- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 21 for raceAcquire (0.14 sec)
-
src/runtime/traceruntime.go
// not inlineable, and we want traceAcquire to be inlineable for // low overhead when the trace is disabled. const debugTraceReentrancy = false // traceAcquire prepares this M for writing one or more trace events. // // nosplit because it's called on the syscall path when stack movement is forbidden. // //go:nosplit func traceAcquire() traceLocker { if !traceEnabled() { return traceLocker{}
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 25.7K bytes - Viewed (0) -
src/runtime/tracecpu.go
// Like the runtime/pprof package, even if that bug didn't exist // we would still want to do a goroutine-level sleep in between // reads to avoid frequent wakeups. trace.cpuSleep.sleep(100_000_000) tl := traceAcquire() if !tl.ok() { // Tracing disabled. break } keepGoing := traceReadCPU(tl.gen) traceRelease(tl) if !keepGoing { break } } done <- struct{}{} }()
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 15 17:03:35 UTC 2024 - 8.7K bytes - Viewed (0) -
src/runtime/proc.go
// might call into the tracer, and the tracer is non-reentrant. trace := traceAcquire() if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) { if trace.ok() { // It's important that we traceRelease before we call handoffp, which may also traceAcquire. trace.ProcSteal(p2, false) traceRelease(trace) } p2.syscalltick++ handoffp(p2)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 207.5K bytes - Viewed (0) -
src/runtime/traceallocfree.go
w.varint(uint64(pageSize)) w.varint(uint64(minHeapAlign)) w.varint(uint64(fixedStack)) // Finish writing the batch. w.flush().end() // Start tracing. trace := traceAcquire() if !trace.ok() { throw("traceSnapshotMemory: tracing is not enabled") } // Write out all the heap spans and heap objects. for _, s := range mheap_.allspans { if s.state.get() == mSpanDead {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 20:32:51 UTC 2024 - 5.9K bytes - Viewed (0) -
src/runtime/mgcsweep.go
print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n") throw("mspan.sweep: bad span state") } trace := traceAcquire() if trace.ok() { trace.GCSweepSpan(s.npages * _PageSize) traceRelease(trace) } mheap_.pagesSwept.Add(int64(s.npages)) spc := s.spanclass size := s.elemsize
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:52:18 UTC 2024 - 32.9K bytes - Viewed (0) -
src/cmd/compile/internal/test/inl_test.go
// internal/runtime/atomic.Loaduintptr is only intrinsified on these platforms. want["runtime"] = append(want["runtime"], "traceAcquire") } if bits.UintSize == 64 { // mix is only defined on 64-bit architectures want["runtime"] = append(want["runtime"], "mix") // (*Bool).CompareAndSwap is just over budget on 32-bit systems (386, arm).
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 09 04:07:57 UTC 2024 - 10.7K bytes - Viewed (0) -
src/runtime/coro.go
// but we need to make sure the tracer can only observe the // start and end states to maintain a coherent model and avoid // emitting an event for every single transition. trace := traceAcquire() if locked { // Detach the goroutine from the thread; we'll attach to the goroutine we're // switching to before returning. gp.lockedm.set(nil) } if exit {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 19:09:18 UTC 2024 - 7.4K bytes - Viewed (0) -
src/runtime/mgcmark.go
retry: if gcCPULimiter.limiting() { // If the CPU limiter is enabled, intentionally don't // assist to reduce the amount of CPU time spent in the GC. if enteredMarkAssistForTracing { trace := traceAcquire() if trace.ok() { trace.GCMarkAssistDone() // Set this *after* we trace the end to make sure // that we emit an in-progress event if this is // the first event for the goroutine in the trace
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 18 21:25:11 UTC 2024 - 52.5K bytes - Viewed (0) -
src/runtime/mheap.go
return } // Disable preemption so the GC can't start while we're // sweeping, so we can read h.sweepArenas, and so // traceGCSweepStart/Done pair on the P. mp := acquirem() trace := traceAcquire() if trace.ok() { trace.GCSweepStart() traceRelease(trace) } arenas := h.sweepArenas locked := false for npage > 0 { // Pull from accumulated credit first.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 78K bytes - Viewed (0) -
src/runtime/mgc.go
semacquire(&worldsema) // For stats, check if this GC was forced by the user. // Update it under gcsema to avoid gctrace getting wrong values. work.userForced = trigger.kind == gcTriggerCycle trace := traceAcquire() if trace.ok() { trace.GCStart() traceRelease(trace) } // Check that all Ps have finished deferred mcache flushes. for _, p := range allp {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 16:25:21 UTC 2024 - 62K bytes - Viewed (0)