- Sort Score
- Result 10 results
- Languages All
Results 1 - 4 of 4 for raceacquireg (0.46 sec)
-
src/runtime/mprof.go
// goroutineProfiled field cleared. forEachGRace(func(gp1 *g) { gp1.goroutineProfiled.Store(goroutineProfileAbsent) }) if raceenabled { raceacquire(unsafe.Pointer(&labelSync)) } if n != int(endOffset) { // It's a big surprise that the number of goroutines changed while we // were collecting the profile. But probably better to return a
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:57:37 UTC 2024 - 53.3K bytes - Viewed (0) -
src/runtime/mheap.go
return } // Disable preemption so the GC can't start while we're // sweeping, so we can read h.sweepArenas, and so // traceGCSweepStart/Done pair on the P. mp := acquirem() trace := traceAcquire() if trace.ok() { trace.GCSweepStart() traceRelease(trace) } arenas := h.sweepArenas locked := false for npage > 0 { // Pull from accumulated credit first.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 78K bytes - Viewed (0) -
src/runtime/mgc.go
semacquire(&worldsema) // For stats, check if this GC was forced by the user. // Update it under gcsema to avoid gctrace getting wrong values. work.userForced = trigger.kind == gcTriggerCycle trace := traceAcquire() if trace.ok() { trace.GCStart() traceRelease(trace) } // Check that all Ps have finished deferred mcache flushes. for _, p := range allp {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 16:25:21 UTC 2024 - 62K bytes - Viewed (0) -
src/runtime/malloc.go
// Init functions are executed sequentially in a single goroutine. inittrace.bytes += uint64(fullSize) } if traceAllocFreeEnabled() { trace := traceAcquire() if trace.ok() { trace.HeapObjectAlloc(uintptr(x), typ) traceRelease(trace) } } } if assistG != nil { // Account for internal fragmentation in the assist
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 59.6K bytes - Viewed (0)