- Sort Score
- Result 10 results
- Languages All
Results 1 - 4 of 4 for _GCoff (0.12 sec)
-
src/runtime/mgc.go
setGCPhase(_GCoff) stwSwept = gcSweep(work.mode) }) mp.traceback = 0 casgstatus(curgp, _Gwaiting, _Grunning) trace := traceAcquire() if trace.ok() { trace.GCDone() traceRelease(trace) } // all done mp.preemptoff = "" if gcphase != _GCoff { throw("gc done but gcphase != _GCoff") }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 16:25:21 UTC 2024 - 62K bytes - Viewed (0) -
src/runtime/mheap.go
// // - A span may transition from free to in-use or manual during any GC // phase. // // - During sweeping (gcphase == _GCoff), a span may transition from // in-use to free (as a result of sweeping) or manual to free (as a // result of stacks being freed). // // - During GC (gcphase != _GCoff), a span *must not* transition from // manual or in-use to free. Because concurrent GC may read a pointer
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 78K bytes - Viewed (0) -
src/runtime/mgcpacer.go
// //go:systemstack func gcControllerCommit() { assertWorldStoppedOrLockHeld(&mheap_.lock) gcController.commit(isSweepDone()) // Update mark pacing. if gcphase != _GCoff { gcController.revise() } // TODO(mknyszek): This isn't really accurate any longer because the heap // goal is computed dynamically. Still useful to snapshot, but not as useful. trace := traceAcquire()
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 55.4K bytes - Viewed (0) -
src/runtime/malloc.go
// Allocate black during GC. // All slots hold nil so no scanning is needed. // This may be racing with GC so do it atomically if there can be // a race marking the bit. if gcphase != _GCoff { gcmarknewobject(span, uintptr(x)) } if raceenabled { racemalloc(x, size) } if msanenabled { msanmalloc(x, size) } if asanenabled {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 59.6K bytes - Viewed (0)