- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 24 for traceRelease (1.15 sec)
-
src/runtime/mgcmark.go
// acquire/release because this is part of the // goroutine's trace state, and it must be atomic // with respect to the tracer. gp.inMarkAssist = false traceRelease(trace) } else { // This state is tracked even if tracing isn't enabled. // It's only used by the new tracer. // See the comment on enteredMarkAssistForTracing. gp.inMarkAssist = false }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 18 21:25:11 UTC 2024 - 52.5K bytes - Viewed (0) -
src/runtime/mgcpacer.go
} // Run the background mark worker. gp := node.gp.ptr() trace := traceAcquire() casgstatus(gp, _Gwaiting, _Grunnable) if trace.ok() { trace.GoUnpark(gp, 0) traceRelease(trace) } return gp, now } // resetLive sets up the controller state for the next mark phase after the end // of the previous one. Must be called after endCycle and before commit, before
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 55.4K bytes - Viewed (0) -
src/runtime/stack.go
s.elemsize = uintptr(n) } v = unsafe.Pointer(s.base()) } if traceAllocFreeEnabled() { trace := traceTryAcquire() if trace.ok() { trace.GoroutineStackAlloc(uintptr(v), uintptr(n)) traceRelease(trace) } } if raceenabled { racemalloc(v, uintptr(n)) } if msanenabled { msanmalloc(v, uintptr(n)) } if asanenabled { asanunpoison(v, uintptr(n)) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 41.1K bytes - Viewed (0) -
src/runtime/mgc.go
// Update it under gcsema to avoid gctrace getting wrong values. work.userForced = trigger.kind == gcTriggerCycle trace := traceAcquire() if trace.ok() { trace.GCStart() traceRelease(trace) } // Check that all Ps have finished deferred mcache flushes. for _, p := range allp { if fg := p.mcache.flushGen.Load(); fg != mheap_.sweepgen {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 16:25:21 UTC 2024 - 62K bytes - Viewed (0) -
src/runtime/malloc.go
inittrace.bytes += uint64(fullSize) } if traceAllocFreeEnabled() { trace := traceAcquire() if trace.ok() { trace.HeapObjectAlloc(uintptr(x), typ) traceRelease(trace) } } } if assistG != nil { // Account for internal fragmentation in the assist // debt now that we know it. // // N.B. Use the full size because that's how the rest
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 59.6K bytes - Viewed (0) -
src/runtime/race.go
// In terms of the C memory model, RaceRelease is equivalent to // atomic_store(memory_order_release). // //go:nosplit func RaceRelease(addr unsafe.Pointer) { racerelease(addr) } // RaceReleaseMerge is like RaceRelease, but also establishes a happens-before // relation with the preceding RaceRelease or RaceReleaseMerge on addr. // // In terms of the C memory model, RaceReleaseMerge is equivalent to // atomic_exchange(memory_order_release). //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:37:29 UTC 2024 - 20.4K bytes - Viewed (0) -
src/internal/race/race.go
package race import ( "runtime" "unsafe" ) const Enabled = true func Acquire(addr unsafe.Pointer) { runtime.RaceAcquire(addr) } func Release(addr unsafe.Pointer) { runtime.RaceRelease(addr) } func ReleaseMerge(addr unsafe.Pointer) { runtime.RaceReleaseMerge(addr) } func Disable() { runtime.RaceDisable() } func Enable() { runtime.RaceEnable() }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 19 23:33:27 UTC 2023 - 881 bytes - Viewed (0) -
src/runtime/race0.go
func raceacquireg(gp *g, addr unsafe.Pointer) { throw("race") } func raceacquirectx(racectx uintptr, addr unsafe.Pointer) { throw("race") } func racerelease(addr unsafe.Pointer) { throw("race") } func racereleaseg(gp *g, addr unsafe.Pointer) { throw("race") }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 28 18:17:57 UTC 2021 - 2.8K bytes - Viewed (0) -
src/runtime/proflabel.go
// This would more properly use &getg().labels as the sync address, // but we do the read in a signal handler and can't call the race runtime then. // // This uses racereleasemerge rather than just racerelease so // the acquire in profBuf.read synchronizes with *all* prior // setProfLabel operations, not just the most recent one. This // is important because profBuf.read will observe different
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 16:25:21 UTC 2024 - 2.1K bytes - Viewed (0) -
src/runtime/chan.go
unlock(&c.lock) panic(plainError("close of closed channel")) } if raceenabled { callerpc := getcallerpc() racewritepc(c.raceaddr(), callerpc, abi.FuncPCABIInternal(closechan)) racerelease(c.raceaddr()) } c.closed = 1 var glist gList // release all readers for { sg := c.recvq.dequeue() if sg == nil { break } if sg.elem != nil {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:16:50 UTC 2024 - 25.9K bytes - Viewed (0)