- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 288 for curg (0.04 sec)
-
src/runtime/sys_linux_ppc64x.s
CMP $0, R6 BEQ sigtramp // g.m == nil MOVW m_ncgo(R6), R7 CMPW $0, R7 BEQ sigtramp // g.m.ncgo = 0 MOVD m_curg(R6), R7 CMP $0, R7 BEQ sigtramp // g.m.curg == nil MOVD g_syscallsp(R7), R7 CMP $0, R7 BEQ sigtramp // g.m.curg.syscallsp == 0 MOVD m_cgoCallers(R6), R7 // R7 is the fifth arg in C calling convention. CMP $0, R7 BEQ sigtramp // g.m.cgoCallers == nil MOVW m_cgoCallersUse(R6), R8
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 18:17:17 UTC 2024 - 18.1K bytes - Viewed (0) -
src/runtime/sys_darwin_amd64.s
JZ sigtramp // g.m == nil MOVL m_ncgo(AX), CX TESTL CX, CX JZ sigtramp // g.m.ncgo == 0 MOVQ m_curg(AX), CX TESTQ CX, CX JZ sigtramp // g.m.curg == nil MOVQ g_syscallsp(CX), CX TESTQ CX, CX JZ sigtramp // g.m.curg.syscallsp == 0 MOVQ m_cgoCallers(AX), R8 TESTQ R8, R8 JZ sigtramp // g.m.cgoCallers == nil MOVL m_cgoCallersUse(AX), CX TESTL CX, CX
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Aug 03 16:07:59 UTC 2023 - 19.7K bytes - Viewed (0) -
src/runtime/asm_wasm.s
// func switchToCrashStack0(fn func()) TEXT runtime·switchToCrashStack0(SB), NOSPLIT, $0-8 MOVD fn+0(FP), CTXT // context register MOVD g_m(g), R2 // curm // set g to gcrash MOVD $runtime·gcrash(SB), g // g = &gcrash MOVD R2, g_m(g) // g.m = curm MOVD g, m_g0(R2) // curm.g0 = g // switch to crashstack I64Load (g_stack+stack_hi)(g) I64Const $(-4*8) I64Add I32WrapI64 Set SP
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Nov 20 21:26:51 UTC 2023 - 11.8K bytes - Viewed (0) -
src/runtime/mbitmap.go
// for that memory to get freed. KeepAlive(ep) return } // stack if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi { found := false var u unwinder for u.initAt(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0); u.valid(); u.next() { if u.frame.sp <= uintptr(p) && uintptr(p) < u.frame.varp { found = true break } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 00:18:55 UTC 2024 - 60K bytes - Viewed (0) -
src/runtime/mgcwork.go
lock(&work.wbufSpans.lock) if gcphase != _GCoff || work.wbufSpans.free.isEmpty() { unlock(&work.wbufSpans.lock) return false } systemstack(func() { gp := getg().m.curg for i := 0; i < batchSize && !(preemptible && gp.preempt); i++ { span := work.wbufSpans.free.first if span == nil { break } work.wbufSpans.free.remove(span)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 12.9K bytes - Viewed (0) -
src/runtime/mgcmark.go
// If this is a self-scan, put the user G in // _Gwaiting to prevent self-deadlock. It may // already be in _Gwaiting if this is a mark // worker or we're in mark termination. userG := getg().m.curg selfScan := gp == userG && readgstatus(userG) == _Grunning if selfScan { casGToWaitingForGC(userG, _Grunning, waitReasonGarbageCollectionScan) } // TODO: suspendG blocks (and spins) until gp
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 18 21:25:11 UTC 2024 - 52.5K bytes - Viewed (0) -
src/internal/trace/trace_test.go
// Examine the execution tracer's view of the CPU profile samples. Filter it // to only include samples from the single test goroutine. Use the goroutine // ID that was recorded in the events: that should reflect getg().m.curg, // same as the profiler's labels (even when the M is using its g0 stack). totalTraceSamples := 0 traceSamples := 0 traceStacks := make(map[string]int) r, err := trace.NewReader(bytes.NewReader(tb))
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 18.5K bytes - Viewed (0) -
src/runtime/race_ppc64le.s
// See racecallback for command codes. TEXT runtime·racecallbackthunk(SB), NOSPLIT|NOFRAME, $0 // Handle command raceGetProcCmd (0) here. // First, code below assumes that we are on curg, while raceGetProcCmd // can be executed on g0. Second, it is called frequently, so will // benefit from this fast path. MOVD $0, R0 // clear R0 since we came from C code CMP R3, $0 BNE rest
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:37:29 UTC 2024 - 17K bytes - Viewed (0) -
src/runtime/trace.go
if gp.trace.statusWasTraced(gen) { return } // Scribble down information about this goroutine. ug := untracedG{gp: gp, mid: -1} systemstack(func() { me := getg().m.curg // We don't have to handle this G status transition because we // already eliminated ourselves from consideration above. casGToWaitingForGC(me, _Grunning, waitReasonTraceGoroutineStatus)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 21:17:41 UTC 2024 - 37.1K bytes - Viewed (0) -
src/runtime/sema.go
semacquire1(addr, false, 0, 0, waitReasonSemacquire) } func semacquire1(addr *uint32, lifo bool, profile semaProfileFlags, skipframes int, reason waitReason) { gp := getg() if gp != gp.m.curg { throw("semacquire not on the G stack") } // Easy case. if cansemacquire(addr) { return } // Harder case: // increment waiter count
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 19K bytes - Viewed (0)