- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 19 for raceacquireg (0.31 sec)
-
src/runtime/chan.go
// operations like close(). return unsafe.Pointer(&c.buf) } func racesync(c *hchan, sg *sudog) { racerelease(chanbuf(c, 0)) raceacquireg(sg.g, chanbuf(c, 0)) racereleaseg(sg.g, chanbuf(c, 0)) raceacquire(chanbuf(c, 0)) } // Notify the race detector of a send or receive involving buffer entry idx // and a channel c or its communicating partner sg.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:16:50 UTC 2024 - 25.9K bytes - Viewed (0) -
src/runtime/race.go
// In terms of the C memory model (C11 §5.1.2.4, §7.17.3), // RaceAcquire is equivalent to atomic_load(memory_order_acquire). // //go:nosplit func RaceAcquire(addr unsafe.Pointer) { raceacquire(addr) } // RaceRelease performs a release operation on addr that // can synchronize with a later RaceAcquire on addr. // // In terms of the C memory model, RaceRelease is equivalent to
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:37:29 UTC 2024 - 20.4K bytes - Viewed (0) -
src/runtime/trace.go
tl := traceAcquire() if !pp.trace.statusWasTraced(tl.gen) { tl.writer().writeProcStatusForP(pp, false).end() } traceRelease(tl) }) semrelease(&worldsema) } // Block until the trace reader has finished processing the last generation. semacquire(&trace.doneSema[gen%2]) if raceenabled { raceacquire(unsafe.Pointer(&trace.doneSema[gen%2])) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 21:17:41 UTC 2024 - 37.1K bytes - Viewed (0) -
src/runtime/traceruntime.go
// not inlineable, and we want traceAcquire to be inlineable for // low overhead when the trace is disabled. const debugTraceReentrancy = false // traceAcquire prepares this M for writing one or more trace events. // // nosplit because it's called on the syscall path when stack movement is forbidden. // //go:nosplit func traceAcquire() traceLocker { if !traceEnabled() { return traceLocker{}
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 25.7K bytes - Viewed (0) -
src/runtime/tracecpu.go
// Like the runtime/pprof package, even if that bug didn't exist // we would still want to do a goroutine-level sleep in between // reads to avoid frequent wakeups. trace.cpuSleep.sleep(100_000_000) tl := traceAcquire() if !tl.ok() { // Tracing disabled. break } keepGoing := traceReadCPU(tl.gen) traceRelease(tl) if !keepGoing { break } } done <- struct{}{} }()
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 15 17:03:35 UTC 2024 - 8.7K bytes - Viewed (0) -
src/runtime/cgocall.go
// used again if callback decide to make syscall. winsyscall := mp.winsyscall exitsyscall() getg().m.winsyscall = winsyscall // Note that raceacquire must be called only after exitsyscall has // wired this M to a P. if raceenabled { raceacquire(unsafe.Pointer(&racecgosync)) } // From the garbage collector's perspective, time can move // backwards in the sequence above. If there's a callback into
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:16:47 UTC 2024 - 24.2K bytes - Viewed (0) -
src/runtime/proc.go
// might call into the tracer, and the tracer is non-reentrant. trace := traceAcquire() if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) { if trace.ok() { // It's important that we traceRelease before we call handoffp, which may also traceAcquire. trace.ProcSteal(p2, false) traceRelease(trace) } p2.syscalltick++ handoffp(p2)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 207.5K bytes - Viewed (0) -
src/runtime/traceallocfree.go
w.varint(uint64(pageSize)) w.varint(uint64(minHeapAlign)) w.varint(uint64(fixedStack)) // Finish writing the batch. w.flush().end() // Start tracing. trace := traceAcquire() if !trace.ok() { throw("traceSnapshotMemory: tracing is not enabled") } // Write out all the heap spans and heap objects. for _, s := range mheap_.allspans { if s.state.get() == mSpanDead {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 20:32:51 UTC 2024 - 5.9K bytes - Viewed (0) -
src/runtime/syscall_windows.go
} func cbsLock() { lock(&cbs.lock) // compileCallback is used by goenvs prior to completion of schedinit. // raceacquire involves a racecallback to get the proc, which is not // safe prior to scheduler initialization. Thus avoid instrumentation // until then. if raceenabled && mainStarted { raceacquire(unsafe.Pointer(&cbs.lock)) } } func cbsUnlock() { if raceenabled && mainStarted {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 20:12:46 UTC 2024 - 16.6K bytes - Viewed (0) -
src/runtime/mgcsweep.go
print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n") throw("mspan.sweep: bad span state") } trace := traceAcquire() if trace.ok() { trace.GCSweepSpan(s.npages * _PageSize) traceRelease(trace) } mheap_.pagesSwept.Add(int64(s.npages)) spc := s.spanclass size := s.elemsize
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:52:18 UTC 2024 - 32.9K bytes - Viewed (0)