Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 6 of 6 for forEachGRace (0.16 sec)

  1. src/runtime/traceallocfree.go

    				x := s.base() + i*s.elemsize
    				trace.HeapObjectExists(x, s.typePointersOfUnchecked(x).typ)
    			}
    			abits.advance()
    		}
    	}
    
    	// Write out all the goroutine stacks.
    	forEachGRace(func(gp *g) {
    		trace.GoroutineStackExists(gp.stack.lo, gp.stack.hi-gp.stack.lo)
    	})
    	traceRelease(trace)
    }
    
    func traceSpanTypeAndClass(s *mspan) traceArg {
    	if s.state.get() == mSpanInUse {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 20:32:51 UTC 2024
    - 5.9K bytes
    - Viewed (0)
  2. src/runtime/mprof.go

    	//
    	// Any goroutine that the scheduler tries to execute concurrently with this
    	// call will start by adding itself to the profile (before the act of
    	// executing can cause any changes in its stack).
    	forEachGRace(func(gp1 *g) {
    		tryRecordGoroutineProfile(gp1, pcbuf, Gosched)
    	})
    
    	stw = stopTheWorld(stwGoroutineProfileCleanup)
    	endOffset := goroutineProfile.offset.Swap(0)
    	goroutineProfile.active = false
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:37 UTC 2024
    - 53.3K bytes
    - Viewed (0)
  3. src/runtime/traceback.go

    	// throw/panic, where locking could be out-of-order or a direct
    	// deadlock.
    	//
    	// Instead, use forEachGRace, which requires no locking. We don't lock
    	// against concurrent creation of new Gs, but even with allglock we may
    	// miss Gs created after this loop.
    	forEachGRace(func(gp *g) {
    		if gp == me || gp == curgp || readgstatus(gp) == _Gdead || isSystemGoroutine(gp, false) && level < 2 {
    			return
    		}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 16:25:21 UTC 2024
    - 55.1K bytes
    - Viewed (0)
  4. src/runtime/trace.go

    		gp           *g
    		goid         uint64
    		mid          int64
    		stackID      uint64
    		status       uint32
    		waitreason   waitReason
    		inMarkAssist bool
    	}
    	var untracedGs []untracedG
    	forEachGRace(func(gp *g) {
    		// Make absolutely sure all Gs are ready for the next
    		// generation. We need to do this even for dead Gs because
    		// they may come alive with a new identity, and its status
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 21:17:41 UTC 2024
    - 37.1K bytes
    - Viewed (0)
  5. src/runtime/mgcmark.go

    	//
    	// We only check the first nStackRoots Gs that we should have scanned.
    	// Since we don't care about newer Gs (see comment in
    	// gcMarkRootPrepare), no locking is required.
    	i := 0
    	forEachGRace(func(gp *g) {
    		if i >= work.nStackRoots {
    			return
    		}
    
    		if !gp.gcscandone {
    			println("gp", gp, "goid", gp.goid,
    				"status", readgstatus(gp),
    				"gcscandone", gp.gcscandone)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 18 21:25:11 UTC 2024
    - 52.5K bytes
    - Viewed (0)
  6. src/runtime/proc.go

    func forEachG(fn func(gp *g)) {
    	lock(&allglock)
    	for _, gp := range allgs {
    		fn(gp)
    	}
    	unlock(&allglock)
    }
    
    // forEachGRace calls fn on every G from allgs.
    //
    // forEachGRace avoids locking, but does not exclude addition of new Gs during
    // execution, which may be missed.
    func forEachGRace(fn func(gp *g)) {
    	ptr, length := atomicAllG()
    	for i := uintptr(0); i < length; i++ {
    		gp := atomicAllGIndex(ptr, i)
    		fn(gp)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 207.5K bytes
    - Viewed (0)
Back to top