- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 37 for raceAcquire (0.21 sec)
-
src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go
m := int(iovecs[i].Len) if m > n { m = n } n -= m if m > 0 { raceWriteRange(unsafe.Pointer(iovecs[i].Base), m) } } if err == nil { raceAcquire(unsafe.Pointer(&ioSync)) } } func Writev(fd int, iovs [][]byte) (n int, err error) { iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) if raceenabled {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Mar 07 05:26:45 UTC 2024 - 77.5K bytes - Viewed (0) -
src/cmd/vendor/golang.org/x/sys/windows/syscall_windows.go
func ReadFile(fd Handle, p []byte, done *uint32, overlapped *Overlapped) error { err := readFile(fd, p, done, overlapped) if raceenabled { if *done > 0 { raceWriteRange(unsafe.Pointer(&p[0]), int(*done)) } raceAcquire(unsafe.Pointer(&ioSync)) } return err } func WriteFile(fd Handle, p []byte, done *uint32, overlapped *Overlapped) error { if raceenabled { raceReleaseMerge(unsafe.Pointer(&ioSync)) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 05 22:18:42 UTC 2024 - 82.8K bytes - Viewed (0) -
src/cmd/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go
} func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if raceenabled { if n > 0 { raceWriteRange(unsafe.Pointer(&p[0]), n) } if err == nil { raceAcquire(unsafe.Pointer(&ioSync)) } } return } func Write(fd int, p []byte) (n int, err error) { if raceenabled { raceReleaseMerge(unsafe.Pointer(&ioSync)) } n, err = write(fd, p)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 16:12:58 UTC 2024 - 84.4K bytes - Viewed (0) -
src/runtime/traceruntime.go
// not inlineable, and we want traceAcquire to be inlineable for // low overhead when the trace is disabled. const debugTraceReentrancy = false // traceAcquire prepares this M for writing one or more trace events. // // nosplit because it's called on the syscall path when stack movement is forbidden. // //go:nosplit func traceAcquire() traceLocker { if !traceEnabled() { return traceLocker{}
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 25.7K bytes - Viewed (0) -
src/runtime/debugcall.go
gp.schedlink.set(newg) }) // Switch to the new goroutine. mcall(func(gp *g) { // Get newg. newg := gp.schedlink.ptr() gp.schedlink = 0 // Park the calling goroutine. trace := traceAcquire() if trace.ok() { // Trace the event before the transition. It may take a // stack trace, but we won't own the stack after the // transition anymore. trace.GoPark(traceBlockDebugCall, 1) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 05 20:50:21 UTC 2024 - 7.1K bytes - Viewed (0) -
src/runtime/mcentral.go
// Deduct credit for this span allocation and sweep if necessary. spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize deductSweepCredit(spanBytes, 0) traceDone := false trace := traceAcquire() if trace.ok() { trace.GCSweepStart() traceRelease(trace) } // If we sweep spanBudget spans without finding any free // space, just allocate a fresh span. This limits the amount
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 8.1K bytes - Viewed (0) -
src/runtime/tracecpu.go
// Like the runtime/pprof package, even if that bug didn't exist // we would still want to do a goroutine-level sleep in between // reads to avoid frequent wakeups. trace.cpuSleep.sleep(100_000_000) tl := traceAcquire() if !tl.ok() { // Tracing disabled. break } keepGoing := traceReadCPU(tl.gen) traceRelease(tl) if !keepGoing { break } } done <- struct{}{} }()
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 15 17:03:35 UTC 2024 - 8.7K bytes - Viewed (0) -
src/runtime/proc.go
// might call into the tracer, and the tracer is non-reentrant. trace := traceAcquire() if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) { if trace.ok() { // It's important that we traceRelease before we call handoffp, which may also traceAcquire. trace.ProcSteal(p2, false) traceRelease(trace) } p2.syscalltick++ handoffp(p2)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 207.5K bytes - Viewed (0) -
src/runtime/traceallocfree.go
w.varint(uint64(pageSize)) w.varint(uint64(minHeapAlign)) w.varint(uint64(fixedStack)) // Finish writing the batch. w.flush().end() // Start tracing. trace := traceAcquire() if !trace.ok() { throw("traceSnapshotMemory: tracing is not enabled") } // Write out all the heap spans and heap objects. for _, s := range mheap_.allspans { if s.state.get() == mSpanDead {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 20:32:51 UTC 2024 - 5.9K bytes - Viewed (0) -
src/runtime/mgcsweep.go
print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n") throw("mspan.sweep: bad span state") } trace := traceAcquire() if trace.ok() { trace.GCSweepSpan(s.npages * _PageSize) traceRelease(trace) } mheap_.pagesSwept.Add(int64(s.npages)) spc := s.spanclass size := s.elemsize
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:52:18 UTC 2024 - 32.9K bytes - Viewed (0)