- Sort Score
- Result 10 results
- Languages All
Results 71 - 80 of 609 for getg (2.99 sec)
-
src/runtime/stubs.go
//go:linkname add //go:nosplit func add(p unsafe.Pointer, x uintptr) unsafe.Pointer { return unsafe.Pointer(uintptr(p) + x) } // getg returns the pointer to the current g. // The compiler rewrites calls to this function into instructions // that fetch the g directly (from TLS or from the dedicated register). func getg() *g // mcall switches from the g to the g0 stack and invokes fn(g), // where g is the goroutine that made the call.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 20.2K bytes - Viewed (0) -
src/runtime/malloc.go
} else if size&1 == 0 { align = 2 } else { align = 1 } } return persistentalloc(size, align, &memstats.other_sys) } if inittrace.active && inittrace.id == getg().goid { // Init functions are executed sequentially in a single goroutine. inittrace.allocs += 1 } } // assistG is the G to charge for this allocation, or nil if
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 59.6K bytes - Viewed (0) -
src/runtime/traceruntime.go
// buffer flushes are rare. Record the lock edge even if it doesn't happen // this time. lockRankMayTraceFlush() // Check if we're already locked. If so, return an invalid traceLocker. if getg().m.trace.seqlock.Load()%2 == 1 { return traceLocker{} } return traceAcquireEnabled() } // ok returns true if the traceLocker is valid (i.e. tracing is enabled). //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 25.7K bytes - Viewed (0) -
src/runtime/runtime.go
// from nanotime that we can use (some platforms have a really coarse system time granularity). // We require some amount of time to pass to ensure that the conversion rate is fairly accurate // in aggregate. But because we compute this rate lazily, there's a pretty good chance a decent // amount of time has passed by the time we get here. //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:16:47 UTC 2024 - 9.9K bytes - Viewed (0) -
src/runtime/netpoll_solaris.go
libc_port_associate, libc_port_dissociate, libc_port_getn, libc_port_alert libcFunc netpollWakeSig atomic.Uint32 // used to avoid duplicate calls of netpollBreak ) func errno() int32 { return *getg().m.perrno } func port_create() int32 { return int32(sysvicall0(&libc_port_create)) } func port_associate(port, source int32, object uintptr, events uint32, user uintptr) int32 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 11.2K bytes - Viewed (0) -
src/runtime/arena.go
} // Prevent preemption as we set up the space for a new object. // // Act like we're allocating. mp := acquirem() if mp.mallocing != 0 { throw("malloc deadlock") } if mp.gsignal == getg() { throw("malloc during signal") } mp.mallocing = 1 var ptr unsafe.Pointer if !typ.Pointers() { // Allocate pointer-less objects from the tail end of the chunk.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:44:56 UTC 2024 - 37.9K bytes - Viewed (0) -
src/runtime/mgcscavenge.go
lock(&s.lock) if getg() != s.g { throw("tried to sleep scavenger from another goroutine") } if worked < minScavWorkTime { // This means there wasn't enough work to actually fill up minScavWorkTime. // That's fine; we shouldn't try to do anything with this information // because it's going result in a short enough sleep request that things // will get messy. Just assume we did at least this much work.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:48:45 UTC 2024 - 52.3K bytes - Viewed (0) -
src/runtime/chan.go
} c.qcount++ unlock(&c.lock) return true } if !block { unlock(&c.lock) return false } // Block on the channel. Some receiver will complete our operation for us. gp := getg() mysg := acquireSudog() mysg.releasetime = 0 if t0 != 0 { mysg.releasetime = -1 } // No stack splits between assigning elem and enqueuing mysg // on gp.waiting where copystack can find it.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:16:50 UTC 2024 - 25.9K bytes - Viewed (0) -
src/runtime/mstats.go
// is covered by idleTime, what we're left with is time spent in _Prunning and _Psyscall, // the latter of which is fine because the P will either go idle or get used for something // else via sysmon. Meanwhile if we subtract GC time from whatever's left, we get non-GC // _Prunning time. Note that this still leaves time spent in sweeping and in the scheduler,
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 08 21:03:13 UTC 2024 - 34.2K bytes - Viewed (0) -
src/runtime/mheap.go
type mSpanStateBox struct { s atomic.Uint8 } // It is nosplit to match get, below. //go:nosplit func (b *mSpanStateBox) set(s mSpanState) { b.s.Store(uint8(s)) } // It is nosplit because it's called indirectly by typedmemclr, // which must not be preempted. //go:nosplit func (b *mSpanStateBox) get() mSpanState { return mSpanState(b.s.Load()) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 78K bytes - Viewed (0)