- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 72 for systemstack (0.22 sec)
-
src/runtime/os_linux.go
if ret >= 0 { return } // I don't know that futex wakeup can return // EAGAIN or EINTR, but if it does, it would be // safe to loop and call futex again. systemstack(func() { print("futexwakeup addr=", addr, " returned ", ret, "\n") }) *(*int32)(unsafe.Pointer(uintptr(0x1006))) = 0x1006 } func getproccount() int32 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 25.9K bytes - Viewed (0) -
src/cmd/compile/internal/noder/noder.go
fallthrough // because of //go:cgo_unsafe_args default: verb := text if i := strings.Index(text, " "); i >= 0 { verb = verb[:i] } flag := pragmaFlag(verb) const runtimePragmas = ir.Systemstack | ir.Nowritebarrier | ir.Nowritebarrierrec | ir.Yeswritebarrierrec if !base.Flag.CompilingRuntime && flag&runtimePragmas != 0 { p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s only allowed in runtime", verb)}) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Oct 11 20:40:57 UTC 2023 - 12.5K bytes - Viewed (0) -
src/runtime/traceback.go
case abi.FuncID_systemstack: // systemstack returns normally, so just follow the // stack transition. if usesLR && funcspdelta(f, frame.pc) == 0 { // We're at the function prologue and the stack // switch hasn't happened, or epilogue where we're // about to return. Just unwind normally. // Do this only on LR machines because on x86 // systemstack doesn't have an SP delta (the CALL
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 16:25:21 UTC 2024 - 55.1K bytes - Viewed (0) -
src/runtime/HACKING.md
System and signal stacks cannot grow, but are large enough to execute runtime and cgo code (8K in a pure Go binary; system-allocated in a cgo binary). Runtime code often temporarily switches to the system stack using `systemstack`, `mcall`, or `asmcgocall` to perform tasks that must not be preempted, that must not grow the user stack, or that switch user goroutines. Code running on the system stack is implicitly
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 13.9K bytes - Viewed (0) -
src/runtime/heapdump.go
// peak stack depths and we risk blowing the system stack. // This is safe because the world is stopped, so we don't // need to worry about anyone shrinking and therefore moving // our stack. var m MemStats systemstack(func() { // Call readmemstats_m here instead of deeper in // writeheapdump_m because we might blow the system stack // otherwise. readmemstats_m(&m) writeheapdump_m(fd, &m) }) startTheWorld(stw)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 09 04:07:57 UTC 2024 - 17.6K bytes - Viewed (0) -
src/runtime/mheap.go
// Don't do any operations that lock the heap on the G stack. // It might trigger stack growth, and the stack growth code needs // to be able to allocate heap. var s *mspan systemstack(func() { // To prevent excessive heap growth, before allocating n pages // we need to sweep and reclaim at least n pages. if !isSweepDone() { h.reclaim(npages) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 78K bytes - Viewed (0) -
src/runtime/metrics.go
a.gcMiscSys = memstats.gcMiscSys.load() a.otherSys = memstats.other_sys.load() a.heapGoal = gcController.heapGoal() a.gcCyclesDone = uint64(memstats.numgc) a.gcCyclesForced = uint64(memstats.numforcedgc) systemstack(func() { lock(&mheap_.lock) a.mSpanSys = memstats.mspan_sys.load() a.mSpanInUse = uint64(mheap_.spanalloc.inuse) a.mCacheSys = memstats.mcache_sys.load() a.mCacheInUse = uint64(mheap_.cachealloc.inuse)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 08 21:03:13 UTC 2024 - 26K bytes - Viewed (0) -
src/runtime/time.go
// in blockTimerChan, but timerHeaped not being set means // it hasn't run t.maybeAdd yet; in that case, running the // timer ourselves now is fine.) if now := nanotime(); t.when <= now { systemstack(func() { t.unlockAndRun(now) // resets t.when }) t.lock() } } } // stop stops the timer t. It may be on some other P, so we can't
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Mar 29 14:36:24 UTC 2024 - 37.5K bytes - Viewed (0) -
src/runtime/arena.go
if mp.mallocing != 0 { throw("malloc deadlock") } if mp.gsignal == getg() { throw("malloc during signal") } mp.mallocing = 1 // Allocate a new user arena. var span *mspan systemstack(func() { span = mheap_.allocUserArenaChunk() }) if span == nil { throw("out of memory") } x := unsafe.Pointer(span.base()) // Allocate black during GC.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:44:56 UTC 2024 - 37.9K bytes - Viewed (0) -
src/runtime/pprof/pprof_test.go
for i := 0; i < 10; i++ { goroutineProf.WriteTo(io.Discard, 1) } }) } // TestLabelSystemstack makes sure CPU profiler samples of goroutines running // on systemstack include the correct pprof labels. See issue #48577 func TestLabelSystemstack(t *testing.T) { // Grab and re-set the initial value before continuing to ensure // GOGC doesn't actually change following the test.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 18:42:28 UTC 2024 - 68.8K bytes - Viewed (0)