- Sort Score
- Result 10 results
- Languages All
Results 11 - 18 of 18 for systemstack (0.13 sec)
-
src/runtime/os_dragonfly.go
ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0) if ret >= 0 { return uintptr(out) } return 0 } //go:nosplit func futexsleep(addr *uint32, val uint32, ns int64) { systemstack(func() { futexsleep1(addr, val, ns) }) } func futexsleep1(addr *uint32, val uint32, ns int64) { var timeout int32 if ns >= 0 { // The timeout is specified in microseconds - ensure that we
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Dec 05 20:34:30 UTC 2023 - 7.1K bytes - Viewed (0) -
src/runtime/sys_openbsd2.go
tp unsafe.Pointer }{_CLOCK_MONOTONIC, unsafe.Pointer(&ts)} if errno := libcCall(unsafe.Pointer(abi.FuncPCABI0(clock_gettime_trampoline)), unsafe.Pointer(&args)); errno < 0 { // Avoid growing the nosplit stack. systemstack(func() { println("runtime: errno", -errno) throw("clock_gettime failed") }) } return ts.tv_sec*1e9 + int64(ts.tv_nsec) } func clock_gettime_trampoline() //go:nosplit
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 8.7K bytes - Viewed (0) -
src/runtime/cgocheck.go
// Do this check last because it is more expensive and rarely true. // If it is false the expense doesn't matter since we are crashing. if inPersistentAlloc(uintptr(unsafe.Pointer(dst))) { return } systemstack(func() { println("write of unpinned Go pointer", hex(uintptr(src)), "to non-Go memory", hex(uintptr(unsafe.Pointer(dst)))) throw(cgoWriteBarrierFail) }) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 09 04:07:57 UTC 2024 - 7.6K bytes - Viewed (0) -
src/runtime/tracetime.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 21:17:41 UTC 2024 - 3.3K bytes - Viewed (0) -
src/runtime/os_openbsd.go
//go:nosplit func semawakeup(mp *m) { atomic.Xadd(&mp.waitsemacount, 1) ret := thrwakeup(uintptr(unsafe.Pointer(&mp.waitsemacount)), 1) if ret != 0 && ret != _ESRCH { // semawakeup can be called on signal stack. systemstack(func() { print("thrwakeup addr=", &mp.waitsemacount, " sem=", mp.waitsemacount, " ret=", ret, "\n") }) } } func osinit() { ncpu = getncpu() physPageSize = getPageSize() }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 6.2K bytes - Viewed (0) -
src/runtime/mwbbuf.go
// and this way we can allow write barriers in the // panic path. getg().m.p.ptr().wbBuf.discard() return } // Switch to the system stack so we don't have to worry about // safe points. systemstack(func() { wbBufFlush1(getg().m.p.ptr()) }) } // wbBufFlush1 flushes p's write barrier buffer to the GC work queue. // // This must not have write barriers because it is part of the write
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 8.1K bytes - Viewed (0) -
src/runtime/coro.go
// newcoro creates a new coro containing a // goroutine blocked waiting to run f // and returns that coro. func newcoro(f func(*coro)) *coro { c := new(coro) c.f = f pc := getcallerpc() gp := getg() systemstack(func() { mp := gp.m start := corostart startfv := *(**funcval)(unsafe.Pointer(&start)) gp = newproc1(startfv, gp, pc, true, waitReasonCoroutine) // Scribble down locked thread state if needed and/or donate
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 19:09:18 UTC 2024 - 7.4K bytes - Viewed (0) -
src/runtime/tracecpu.go
// has completed and that there are no more writers to it. func traceCPUFlush(gen uintptr) { // Flush any remaining trace buffers containing CPU samples. if buf := trace.cpuBuf[gen%2]; buf != nil { systemstack(func() { lock(&trace.lock) traceBufFlush(buf, gen) unlock(&trace.lock) trace.cpuBuf[gen%2] = nil }) } } // traceCPUSample writes a CPU profile sample stack to the execution tracer's
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 15 17:03:35 UTC 2024 - 8.7K bytes - Viewed (0)