- Sort Score
- Result 10 results
- Languages All
Results 1 - 9 of 9 for _GCoff (0.46 sec)
-
src/runtime/mgc.go
setGCPhase(_GCoff) stwSwept = gcSweep(work.mode) }) mp.traceback = 0 casgstatus(curgp, _Gwaiting, _Grunning) trace := traceAcquire() if trace.ok() { trace.GCDone() traceRelease(trace) } // all done mp.preemptoff = "" if gcphase != _GCoff { throw("gc done but gcphase != _GCoff") }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 16:25:21 UTC 2024 - 62K bytes - Viewed (0) -
src/runtime/arena.go
// Make ourselves non-preemptible as we manipulate state and statistics. // // Also required by setUserArenaChunksToFault. mp := acquirem() // We can only set user arenas to fault if we're in the _GCoff phase. if gcphase == _GCoff { lock(&userArenaState.lock) faultList := userArenaState.fault userArenaState.fault = nil unlock(&userArenaState.lock) s.setUserArenaChunkToFault() for _, lc := range faultList {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:44:56 UTC 2024 - 37.9K bytes - Viewed (0) -
src/runtime/mheap.go
// // - A span may transition from free to in-use or manual during any GC // phase. // // - During sweeping (gcphase == _GCoff), a span may transition from // in-use to free (as a result of sweeping) or manual to free (as a // result of stacks being freed). // // - During GC (gcphase != _GCoff), a span *must not* transition from // manual or in-use to free. Because concurrent GC may read a pointer
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 78K bytes - Viewed (0) -
src/runtime/mgcwork.go
// true if it should be called again to free more. func freeSomeWbufs(preemptible bool) bool { const batchSize = 64 // ~1–2 µs per span. lock(&work.wbufSpans.lock) if gcphase != _GCoff || work.wbufSpans.free.isEmpty() { unlock(&work.wbufSpans.lock) return false } systemstack(func() { gp := getg().m.curg for i := 0; i < batchSize && !(preemptible && gp.preempt); i++ {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 12.9K bytes - Viewed (0) -
src/runtime/stack.go
} if s.manualFreeList.ptr() == nil { // s will now have a free stack stackpool[order].item.span.insert(s) } x.ptr().next = s.manualFreeList s.manualFreeList = x s.allocCount-- if gcphase == _GCoff && s.allocCount == 0 { // Span is completely free. Return it to the heap // immediately if we're sweeping. // // If GC is active, we delay the free until the end of
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 41.1K bytes - Viewed (0) -
src/runtime/mfinal.go
func lockRankMayQueueFinalizer() { lockWithRankMayAcquire(&finlock, getLockRank(&finlock)) } func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot *ptrtype) { if gcphase != _GCoff { // Currently we assume that the finalizer queue won't // grow during marking so we don't have to rescan it // during mark termination. If we ever need to lift // this assumption, we can do it by adding the
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 01:56:56 UTC 2024 - 19K bytes - Viewed (0) -
src/runtime/mgcpacer.go
// //go:systemstack func gcControllerCommit() { assertWorldStoppedOrLockHeld(&mheap_.lock) gcController.commit(isSweepDone()) // Update mark pacing. if gcphase != _GCoff { gcController.revise() } // TODO(mknyszek): This isn't really accurate any longer because the heap // goal is computed dynamically. Still useful to snapshot, but not as useful. trace := traceAcquire()
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 55.4K bytes - Viewed (0) -
src/runtime/malloc.go
// Allocate black during GC. // All slots hold nil so no scanning is needed. // This may be racing with GC so do it atomically if there can be // a race marking the bit. if gcphase != _GCoff { gcmarknewobject(span, uintptr(x)) } if raceenabled { racemalloc(x, size) } if msanenabled { msanmalloc(x, size) } if asanenabled {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 59.6K bytes - Viewed (0) -
src/runtime/proc.go
globrunqputhead(pp.runnext.ptr()) pp.runnext = 0 } // Move all timers to the local P. getg().m.p.ptr().timers.take(&pp.timers) // Flush p's write barrier buffer. if gcphase != _GCoff { wbBufFlush1(pp) pp.gcw.dispose() } for i := range pp.sudogbuf { pp.sudogbuf[i] = nil } pp.sudogcache = pp.sudogbuf[:0] pp.pinnerCache = nil for j := range pp.deferpoolbuf {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 207.5K bytes - Viewed (0)