- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 59 for Getg (0.11 sec)
-
src/runtime/cgocall.go
throw("cgocall unavailable") } if fn == nil { throw("cgocall nil") } if raceenabled { racereleasemerge(unsafe.Pointer(&racecgosync)) } mp := getg().m mp.ncgocall++ // Reset traceback. mp.cgoCallers[0] = 0 // Announce we are entering a system call // so that the scheduler knows to create another // M to run goroutines while we are in the
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:16:47 UTC 2024 - 24.2K bytes - Viewed (0) -
src/runtime/mgcsweep.go
// // If the scavenger isn't already awake, wake it up. There's // definitely work for it to do at this point. scavenger.wake() nextMarkBitArenaEpoch() } func bgsweep(c chan int) { sweep.g = getg() lockInit(&sweep.lock, lockRankSweep) lock(&sweep.lock) sweep.parked = true c <- 1 goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceBlockGCSweep, 1) for {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:52:18 UTC 2024 - 32.9K bytes - Viewed (0) -
src/runtime/mgcwork.go
// A gcWork provides the interface to produce and consume work for the // garbage collector. // // A gcWork can be used on the stack as follows: // // (preemption must be disabled) // gcw := &getg().m.p.ptr().gcw // .. call gcw.put() to produce and gcw.tryGet() to consume .. // // It's important that any use of gcWork during the mark phase prevent // the garbage collector from transitioning to mark termination since
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 12.9K bytes - Viewed (0) -
src/runtime/preempt.go
// directly schedule the waiter. The context switch is unavoidable in // the signal case. // //go:systemstack func suspendG(gp *g) suspendGState { if mp := getg().m; mp.curg != nil && readgstatus(mp.curg) == _Grunning { // Since we're on the system stack of this M, the user // G is stuck at an unsafe point. If another goroutine // were to try to preempt m.curg, it could deadlock.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 15:41:45 UTC 2024 - 15.1K bytes - Viewed (0) -
src/runtime/trace.go
if raceenabled { // g0 doesn't have a race context. Borrow the user G's. if getg().racectx != 0 { throw("expected racectx == 0") } getg().racectx = getg().m.curg.racectx // (This defer should get open-coded, which is safe on // the system stack.) defer func() { getg().racectx = 0 }() } // This function must not allocate while holding trace.lock:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 21:17:41 UTC 2024 - 37.1K bytes - Viewed (0) -
src/runtime/syscall_windows.go
// calls back into Go. c := &getg().m.winsyscall c.fn = fn c.n = uintptr(len(args)) if c.n != 0 { c.args = uintptr(noescape(unsafe.Pointer(&args[0]))) } cgocall(asmstdcallAddr, unsafe.Pointer(c)) // cgocall may reschedule us on to a different M, // but it copies the return values into the new M's // so we can read them from there. c = &getg().m.winsyscall return c.r1, c.r2, c.err
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 20:12:46 UTC 2024 - 16.6K bytes - Viewed (0) -
src/cmd/compile/internal/typecheck/func.go
} if t.NumResults() == 1 { n.SetType(l.Type().Result(0).Type) if n.Op() == ir.OCALLFUNC && n.Fun.Op() == ir.ONAME { if sym := n.Fun.(*ir.Name).Sym(); types.RuntimeSymName(sym) == "getg" { // Emit code for runtime.getg() directly instead of calling function. // Most such rewrites (for example the similar one for math.Sqrt) should be done in walk,
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Mar 06 15:23:18 UTC 2024 - 21.1K bytes - Viewed (0) -
src/runtime/stubs.go
//go:linkname add //go:nosplit func add(p unsafe.Pointer, x uintptr) unsafe.Pointer { return unsafe.Pointer(uintptr(p) + x) } // getg returns the pointer to the current g. // The compiler rewrites calls to this function into instructions // that fetch the g directly (from TLS or from the dedicated register). func getg() *g // mcall switches from the g to the g0 stack and invokes fn(g), // where g is the goroutine that made the call.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 20.2K bytes - Viewed (0) -
src/runtime/traceruntime.go
// buffer flushes are rare. Record the lock edge even if it doesn't happen // this time. lockRankMayTraceFlush() // Check if we're already locked. If so, return an invalid traceLocker. if getg().m.trace.seqlock.Load()%2 == 1 { return traceLocker{} } return traceAcquireEnabled() } // ok returns true if the traceLocker is valid (i.e. tracing is enabled). //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 25.7K bytes - Viewed (0) -
src/runtime/runtime.go
// writeErrData is the common parts of writeErr{,Str}. // //go:nosplit func writeErrData(data *byte, n int32) { write(2, unsafe.Pointer(data), n) // If crashing, print a copy to the SetCrashOutput fd. gp := getg() if gp != nil && gp.m.dying > 0 || gp == nil && panicking.Load() > 0 { if fd := crashFD.Load(); fd != ^uintptr(0) { write(fd, unsafe.Pointer(data), n) } } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:16:47 UTC 2024 - 9.9K bytes - Viewed (0)