Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 24 for traceRelease (0.14 sec)

  1. src/runtime/debugcall.go

    			trace.GoSched()
    		}
    		casgstatus(gp, _Grunning, _Grunnable)
    		if trace.ok() {
    			traceRelease(trace)
    		}
    		dropg()
    		lock(&sched.lock)
    		globrunqput(gp)
    		unlock(&sched.lock)
    
    		trace = traceAcquire()
    		casgstatus(callingG, _Gwaiting, _Grunnable)
    		if trace.ok() {
    			trace.GoUnpark(callingG, 0)
    			traceRelease(trace)
    		}
    		execute(callingG, true)
    	})
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Apr 05 20:50:21 UTC 2024
    - 7.1K bytes
    - Viewed (0)
  2. src/runtime/mcentral.go

    		trace.GCSweepDone()
    		traceDone = true
    		traceRelease(trace)
    	}
    
    	// We failed to get a span from the mcentral so get one from mheap.
    	s = c.grow()
    	if s == nil {
    		return nil
    	}
    
    	// At this point s is a span that should have free slots.
    havespan:
    	if !traceDone {
    		trace := traceAcquire()
    		if trace.ok() {
    			trace.GCSweepDone()
    			traceRelease(trace)
    		}
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 8.1K bytes
    - Viewed (0)
  3. src/runtime/traceruntime.go

    //
    //go:nosplit
    func (tl traceLocker) ok() bool {
    	return tl.gen != 0
    }
    
    // traceRelease indicates that this M is done writing trace events.
    //
    // nosplit because it's called on the syscall path when stack movement is forbidden.
    //
    //go:nosplit
    func traceRelease(tl traceLocker) {
    	seq := tl.mp.trace.seqlock.Add(1)
    	if debugTraceReentrancy && seq%2 != 0 {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 25.7K bytes
    - Viewed (0)
  4. src/runtime/trace.go

    		tl.Gomaxprocs(gomaxprocs)
    		traceRelease(tl)
    	}
    
    	// Emit a GCActive event in the new generation if necessary.
    	//
    	// It's important that we do this before allowing stop-the-worlds again,
    	// because that could emit global GC-related events.
    	if !stopTrace && (gcphase == _GCmark || gcphase == _GCmarktermination) {
    		tl := traceAcquire()
    		tl.GCActive()
    		traceRelease(tl)
    	}
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 21:17:41 UTC 2024
    - 37.1K bytes
    - Viewed (0)
  5. src/runtime/proc.go

    			if trace.ok() {
    				// It's important that we traceRelease before we call handoffp, which may also traceAcquire.
    				trace.ProcSteal(p2, false)
    				traceRelease(trace)
    			}
    			p2.syscalltick++
    			handoffp(p2)
    		} else if trace.ok() {
    			traceRelease(trace)
    		}
    	}
    
    	// Wait for remaining Ps to run fn.
    	if wait {
    		for {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 207.5K bytes
    - Viewed (0)
  6. src/runtime/tracecpu.go

    			// reads to avoid frequent wakeups.
    			trace.cpuSleep.sleep(100_000_000)
    
    			tl := traceAcquire()
    			if !tl.ok() {
    				// Tracing disabled.
    				break
    			}
    			keepGoing := traceReadCPU(tl.gen)
    			traceRelease(tl)
    			if !keepGoing {
    				break
    			}
    		}
    		done <- struct{}{}
    	}()
    	trace.cpuLogDone = done
    }
    
    // traceStopReadCPU blocks until the trace CPU reading goroutine exits.
    //
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Apr 15 17:03:35 UTC 2024
    - 8.7K bytes
    - Viewed (0)
  7. src/runtime/traceallocfree.go

    			}
    			abits.advance()
    		}
    	}
    
    	// Write out all the goroutine stacks.
    	forEachGRace(func(gp *g) {
    		trace.GoroutineStackExists(gp.stack.lo, gp.stack.hi-gp.stack.lo)
    	})
    	traceRelease(trace)
    }
    
    func traceSpanTypeAndClass(s *mspan) traceArg {
    	if s.state.get() == mSpanInUse {
    		return traceArg(s.spanclass) << 1
    	}
    	return traceArg(1)
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 20:32:51 UTC 2024
    - 5.9K bytes
    - Viewed (0)
  8. src/runtime/mgcsweep.go

    		throw("mspan.sweep: bad span state")
    	}
    
    	trace := traceAcquire()
    	if trace.ok() {
    		trace.GCSweepSpan(s.npages * _PageSize)
    		traceRelease(trace)
    	}
    
    	mheap_.pagesSwept.Add(int64(s.npages))
    
    	spc := s.spanclass
    	size := s.elemsize
    
    	// The allocBits indicate which unmarked objects don't need to be
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:52:18 UTC 2024
    - 32.9K bytes
    - Viewed (0)
  9. src/runtime/coro.go

    	}
    
    	// Donate locked state.
    	if locked {
    		mp.lockedg.set(gnext)
    		gnext.lockedm.set(mp)
    	}
    
    	// Release the trace locker. We've completed all the necessary transitions..
    	if trace.ok() {
    		traceRelease(trace)
    	}
    
    	// Switch to gnext. Does not return.
    	gogo(&gnext.sched)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jun 07 19:09:18 UTC 2024
    - 7.4K bytes
    - Viewed (0)
  10. src/runtime/mheap.go

    	// sweeping, so we can read h.sweepArenas, and so
    	// traceGCSweepStart/Done pair on the P.
    	mp := acquirem()
    
    	trace := traceAcquire()
    	if trace.ok() {
    		trace.GCSweepStart()
    		traceRelease(trace)
    	}
    
    	arenas := h.sweepArenas
    	locked := false
    	for npage > 0 {
    		// Pull from accumulated credit first.
    		if credit := h.reclaimCredit.Load(); credit > 0 {
    			take := credit
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 78K bytes
    - Viewed (0)
Back to top