- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 12 for systemstack (0.23 sec)
-
src/runtime/mgc.go
work.mode = mode now := nanotime() work.tSweepTerm = now var stw worldStop systemstack(func() { stw = stopTheWorldWithSema(stwGCSweepTerm) }) // Accumulate fine-grained stopping time. work.cpuStats.accumulateGCPauseTime(stw.stoppingCPUTime, 1) // Finish sweep before we start concurrent scan. systemstack(func() { finishsweep_m() })
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 16:25:21 UTC 2024 - 62K bytes - Viewed (0) -
src/runtime/mprof.go
// the profiler locks. This reduces potential contention and chances of // deadlocks. Since the object must be alive during the call to // mProf_Malloc, it's fine to do this non-atomically. systemstack(func() { setprofilebucket(p, b) }) } // Called when freeing a profiled block. func mProf_Free(b *bucket, size uintptr) { index := (mProfCycle.read() + 1) % uint32(len(memRecord{}.future))
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:57:37 UTC 2024 - 53.3K bytes - Viewed (0) -
src/runtime/mgcmark.go
// // See the comment on enteredMarkAssistForTracing. enteredMarkAssistForTracing = true } // Perform assist work systemstack(func() { gcAssistAlloc1(gp, scanWork) // The user stack may have moved, so this can't touch // anything on it until it returns from systemstack. }) completed := gp.param != nil gp.param = nil if completed { gcMarkDone() } if gp.gcAssistBytes < 0 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 18 21:25:11 UTC 2024 - 52.5K bytes - Viewed (0) -
src/runtime/asm_amd64.s
RET // systemstack_switch is a dummy routine that systemstack leaves at the bottom // of the G stack. We need to distinguish the routine that // lives at the bottom of the G stack from the one that lives // at the top of the system stack because the one at the top of // the system stack terminates the stack walk (see topofstack()). // The frame layout needs to match systemstack // so that it can pretend to be systemstack_switch.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat May 11 20:38:24 UTC 2024 - 60.4K bytes - Viewed (0) -
src/runtime/traceback.go
case abi.FuncID_systemstack: // systemstack returns normally, so just follow the // stack transition. if usesLR && funcspdelta(f, frame.pc) == 0 { // We're at the function prologue and the stack // switch hasn't happened, or epilogue where we're // about to return. Just unwind normally. // Do this only on LR machines because on x86 // systemstack doesn't have an SP delta (the CALL
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 16:25:21 UTC 2024 - 55.1K bytes - Viewed (0) -
src/runtime/mheap.go
// Don't do any operations that lock the heap on the G stack. // It might trigger stack growth, and the stack growth code needs // to be able to allocate heap. var s *mspan systemstack(func() { // To prevent excessive heap growth, before allocating n pages // we need to sweep and reclaim at least n pages. if !isSweepDone() { h.reclaim(npages) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 78K bytes - Viewed (0) -
src/runtime/pprof/pprof_test.go
for i := 0; i < 10; i++ { goroutineProf.WriteTo(io.Discard, 1) } }) } // TestLabelSystemstack makes sure CPU profiler samples of goroutines running // on systemstack include the correct pprof labels. See issue #48577 func TestLabelSystemstack(t *testing.T) { // Grab and re-set the initial value before continuing to ensure // GOGC doesn't actually change following the test.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 18:42:28 UTC 2024 - 68.8K bytes - Viewed (0) -
src/runtime/mgcpacer.go
c.gcPercent.Store(in) return out } //go:linkname setGCPercent runtime/debug.setGCPercent func setGCPercent(in int32) (out int32) { // Run on the system stack since we grab the heap lock. systemstack(func() { lock(&mheap_.lock) out = gcController.setGCPercent(in) gcControllerCommit() unlock(&mheap_.lock) }) // If we just disabled GC, wait for any concurrent GC mark to
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 55.4K bytes - Viewed (0) -
src/runtime/mgcscavenge.go
func (p *pageAlloc) scavenge(nbytes uintptr, shouldStop func() bool, force bool) uintptr { released := uintptr(0) for released < nbytes { ci, pageIdx := p.scav.index.find(force) if ci == 0 { break } systemstack(func() { released += p.scavengeOne(ci, pageIdx, nbytes-released) }) if shouldStop != nil && shouldStop() { break } } return released }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:48:45 UTC 2024 - 52.3K bytes - Viewed (0) -
src/runtime/malloc.go
// // Consider marking persistentalloc'd types not in heap by embedding // runtime/internal/sys.NotInHeap. func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer { var p *notInHeap systemstack(func() { p = persistentalloc1(size, align, sysStat) }) return unsafe.Pointer(p) } // Must run on system stack because stack growth can (re)invoke it. // See issue 9174. //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 59.6K bytes - Viewed (0)