- Sort Score
- Result 10 results
- Languages All
Results 1 - 7 of 7 for m_curg (0.1 sec)
-
src/runtime/time_linux_amd64.s
MOVQ m_vdsoSP(BX), DX MOVQ CX, 0(SP) MOVQ DX, 8(SP) LEAQ sec+0(FP), DX MOVQ -8(DX), CX // Sets CX to function return address. MOVQ CX, m_vdsoPC(BX) MOVQ DX, m_vdsoSP(BX) CMPQ R14, m_curg(BX) // Only switch if on curg. JNE noswitch MOVQ m_g0(BX), DX MOVQ (g_sched+gobuf_sp)(DX), SP // Set SP to g0 stack noswitch: SUBQ $32, SP // Space for two time results ANDQ $~15, SP // Align for C code
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat Nov 06 10:24:44 UTC 2021 - 2K bytes - Viewed (0) -
src/internal/trace/testdata/testprog/cpu-profile.go
} // Examine the CPU profiler's view. Filter it to only include samples from // the single test goroutine. Use labels to execute that filter: they should // apply to all work done while that goroutine is getg().m.curg, and they // should apply to no other goroutines. pprofStacks := make(map[string]int) for _, s := range prof.Sample { if s.Label["tracing"] != nil { var fns []string var leaf string
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 3.8K bytes - Viewed (0) -
src/runtime/testdata/testprogcgo/pprof_callback.go
} func CgoPprofCallback() { // Issue 50936 was a crash in the SIGPROF handler when the signal // arrived during the exitsyscall following a cgocall(back) in dropg or // execute, when updating mp.curg. // // These are reachable only when exitsyscall finds no P available. Thus // we make C calls from significantly more Gs than there are available // Ps. Lots of runnable work combined with >20us spent in callGo makes
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Jun 08 15:44:05 UTC 2022 - 1.8K bytes - Viewed (0) -
src/runtime/coro.go
// GoSwitch expects that the current G is running and that we haven't // switched yet for correct status emission. if trace.ok() { trace.GoSwitch(gnext, exit) } // Start running next, without heavy scheduling machinery. // Set mp.curg and gnext.m and then update scheduling state // directly if possible. setGNoWB(&mp.curg, gnext) setMNoWB(&gnext.m, mp)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 19:09:18 UTC 2024 - 7.4K bytes - Viewed (0) -
src/runtime/tracestatus.go
// _Gsyscall is the tracer's signal that the P its bound to is also in a syscall, // so we need to emit a status that matches. See #64318. if w.mp.p.ptr() == pp && w.mp.curg != nil && readgstatus(w.mp.curg)&^_Gscan == _Gsyscall { status = traceProcSyscall } case _Psyscall: status = traceProcSyscall default: throw("attempt to trace invalid or unsupported P status") }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 15 17:03:35 UTC 2024 - 7.2K bytes - Viewed (0) -
src/runtime/debugcall.go
// function call with return PC pc. If not, it returns a string // explaining why. // //go:nosplit func debugCallCheck(pc uintptr) string { // No user calls from the system stack. if getg() != getg().m.curg { return debugCallSystemStack } if sp := getcallersp(); !(getg().stack.lo < sp && sp <= getg().stack.hi) { // Fast syscalls (nanotime) and racecall switch to the // g0 stack without switching g. We can't safely make
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 05 20:50:21 UTC 2024 - 7.1K bytes - Viewed (0) -
src/runtime/traceevent.go
w := tl.writer() if pp := tl.mp.p.ptr(); pp != nil && !pp.trace.statusWasTraced(tl.gen) && pp.trace.acquireStatus(tl.gen) { w = w.writeProcStatus(uint64(pp.id), procStatus, pp.trace.inSweep) } if gp := tl.mp.curg; gp != nil && !gp.trace.statusWasTraced(tl.gen) && gp.trace.acquireStatus(tl.gen) { w = w.writeGoStatus(uint64(gp.goid), int64(tl.mp.procid), goStatus, gp.inMarkAssist, 0 /* no stack */) } return traceEventWriter{w} }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:47:01 UTC 2024 - 9.2K bytes - Viewed (0)