- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 24 for racereleaseg (0.79 sec)
-
src/runtime/tracecpu.go
// reads to avoid frequent wakeups. trace.cpuSleep.sleep(100_000_000) tl := traceAcquire() if !tl.ok() { // Tracing disabled. break } keepGoing := traceReadCPU(tl.gen) traceRelease(tl) if !keepGoing { break } } done <- struct{}{} }() trace.cpuLogDone = done } // traceStopReadCPU blocks until the trace CPU reading goroutine exits. //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 15 17:03:35 UTC 2024 - 8.7K bytes - Viewed (0) -
src/runtime/mgcsweep.go
throw("mspan.sweep: bad span state") } trace := traceAcquire() if trace.ok() { trace.GCSweepSpan(s.npages * _PageSize) traceRelease(trace) } mheap_.pagesSwept.Add(int64(s.npages)) spc := s.spanclass size := s.elemsize // The allocBits indicate which unmarked objects don't need to be
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:52:18 UTC 2024 - 32.9K bytes - Viewed (0) -
src/runtime/traceallocfree.go
} abits.advance() } } // Write out all the goroutine stacks. forEachGRace(func(gp *g) { trace.GoroutineStackExists(gp.stack.lo, gp.stack.hi-gp.stack.lo) }) traceRelease(trace) } func traceSpanTypeAndClass(s *mspan) traceArg { if s.state.get() == mSpanInUse { return traceArg(s.spanclass) << 1 } return traceArg(1) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 20:32:51 UTC 2024 - 5.9K bytes - Viewed (0) -
src/runtime/coro.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 19:09:18 UTC 2024 - 7.4K bytes - Viewed (0) -
src/runtime/mheap.go
// sweeping, so we can read h.sweepArenas, and so // traceGCSweepStart/Done pair on the P. mp := acquirem() trace := traceAcquire() if trace.ok() { trace.GCSweepStart() traceRelease(trace) } arenas := h.sweepArenas locked := false for npage > 0 { // Pull from accumulated credit first. if credit := h.reclaimCredit.Load(); credit > 0 { take := credit
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 78K bytes - Viewed (0) -
src/runtime/mgcmark.go
// acquire/release because this is part of the // goroutine's trace state, and it must be atomic // with respect to the tracer. gp.inMarkAssist = false traceRelease(trace) } else { // This state is tracked even if tracing isn't enabled. // It's only used by the new tracer. // See the comment on enteredMarkAssistForTracing. gp.inMarkAssist = false }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 18 21:25:11 UTC 2024 - 52.5K bytes - Viewed (0) -
src/runtime/type.go
} func reflectOffsLock() { lock(&reflectOffs.lock) if raceenabled { raceacquire(unsafe.Pointer(&reflectOffs.lock)) } } func reflectOffsUnlock() { if raceenabled { racerelease(unsafe.Pointer(&reflectOffs.lock)) } unlock(&reflectOffs.lock) } // resolveNameOff should be an internal detail, // but widely used packages access it using linkname.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:17:26 UTC 2024 - 12.7K bytes - Viewed (0) -
src/runtime/mgcpacer.go
} // Run the background mark worker. gp := node.gp.ptr() trace := traceAcquire() casgstatus(gp, _Gwaiting, _Grunnable) if trace.ok() { trace.GoUnpark(gp, 0) traceRelease(trace) } return gp, now } // resetLive sets up the controller state for the next mark phase after the end // of the previous one. Must be called after endCycle and before commit, before
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 55.4K bytes - Viewed (0) -
src/runtime/syscall_windows.go
// until then. if raceenabled && mainStarted { raceacquire(unsafe.Pointer(&cbs.lock)) } } func cbsUnlock() { if raceenabled && mainStarted { racerelease(unsafe.Pointer(&cbs.lock)) } unlock(&cbs.lock) } // winCallback records information about a registered Go callback. type winCallback struct { fn *funcval // Go function
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 20:12:46 UTC 2024 - 16.6K bytes - Viewed (0) -
src/runtime/stack.go
s.elemsize = uintptr(n) } v = unsafe.Pointer(s.base()) } if traceAllocFreeEnabled() { trace := traceTryAcquire() if trace.ok() { trace.GoroutineStackAlloc(uintptr(v), uintptr(n)) traceRelease(trace) } } if raceenabled { racemalloc(v, uintptr(n)) } if msanenabled { msanmalloc(v, uintptr(n)) } if asanenabled { asanunpoison(v, uintptr(n)) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 41.1K bytes - Viewed (0)