- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 62 for Setg (0.04 sec)
-
src/runtime/trace.go
if raceenabled { // g0 doesn't have a race context. Borrow the user G's. if getg().racectx != 0 { throw("expected racectx == 0") } getg().racectx = getg().m.curg.racectx // (This defer should get open-coded, which is safe on // the system stack.) defer func() { getg().racectx = 0 }() } // This function must not allocate while holding trace.lock:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 21:17:41 UTC 2024 - 37.1K bytes - Viewed (0) -
src/runtime/syscall_windows.go
// calls back into Go. c := &getg().m.winsyscall c.fn = fn c.n = uintptr(len(args)) if c.n != 0 { c.args = uintptr(noescape(unsafe.Pointer(&args[0]))) } cgocall(asmstdcallAddr, unsafe.Pointer(c)) // cgocall may reschedule us on to a different M, // but it copies the return values into the new M's // so we can read them from there. c = &getg().m.winsyscall return c.r1, c.r2, c.err
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 20:12:46 UTC 2024 - 16.6K bytes - Viewed (0) -
src/runtime/cgocheck.go
} if !cgoIsGoPointer(src) { return } if cgoIsGoPointer(unsafe.Pointer(dst)) { return } // If we are running on the system stack then dst might be an // address on the stack, which is OK. gp := getg() if gp == gp.m.g0 || gp == gp.m.gsignal { return } // Allocating memory can write to various mfixalloc structs // that look like they are non-Go memory. if gp.m.mallocing != 0 { return }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 09 04:07:57 UTC 2024 - 7.6K bytes - Viewed (0) -
src/runtime/malloc.go
} else if size&1 == 0 { align = 2 } else { align = 1 } } return persistentalloc(size, align, &memstats.other_sys) } if inittrace.active && inittrace.id == getg().goid { // Init functions are executed sequentially in a single goroutine. inittrace.allocs += 1 } } // assistG is the G to charge for this allocation, or nil if
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 59.6K bytes - Viewed (0) -
src/runtime/mbitmap.go
// another stack. Either way, no need for barriers. // This will also catch if dst is in a freed span, // though that should never have. return } buf := &getg().m.p.ptr().wbBuf // Double-check that the bitmaps generated in the two possible paths match. const doubleCheck = false if doubleCheck { doubleCheckTypePointersOfType(s, typ, dst, size) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 00:18:55 UTC 2024 - 60K bytes - Viewed (0) -
src/runtime/traceruntime.go
// buffer flushes are rare. Record the lock edge even if it doesn't happen // this time. lockRankMayTraceFlush() // Check if we're already locked. If so, return an invalid traceLocker. if getg().m.trace.seqlock.Load()%2 == 1 { return traceLocker{} } return traceAcquireEnabled() } // ok returns true if the traceLocker is valid (i.e. tracing is enabled). //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 25.7K bytes - Viewed (0) -
src/runtime/runtime.go
// writeErrData is the common parts of writeErr{,Str}. // //go:nosplit func writeErrData(data *byte, n int32) { write(2, unsafe.Pointer(data), n) // If crashing, print a copy to the SetCrashOutput fd. gp := getg() if gp != nil && gp.m.dying > 0 || gp == nil && panicking.Load() > 0 { if fd := crashFD.Load(); fd != ^uintptr(0) { write(fd, unsafe.Pointer(data), n) } } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:16:47 UTC 2024 - 9.9K bytes - Viewed (0) -
src/runtime/arena.go
} // Prevent preemption as we set up the space for a new object. // // Act like we're allocating. mp := acquirem() if mp.mallocing != 0 { throw("malloc deadlock") } if mp.gsignal == getg() { throw("malloc during signal") } mp.mallocing = 1 var ptr unsafe.Pointer if !typ.Pointers() { // Allocate pointer-less objects from the tail end of the chunk.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:44:56 UTC 2024 - 37.9K bytes - Viewed (0) -
src/runtime/mgcscavenge.go
// // Must be called from a regular goroutine that can allocate. func (s *scavengerState) init() { if s.g != nil { throw("scavenger state is already wired") } lockInit(&s.lock, lockRankScavenge) s.g = getg() s.timer = new(timer) f := func(s any, _ uintptr, _ int64) { s.(*scavengerState).wake() } s.timer.init(f, s) // input: fraction of CPU time actually used. // setpoint: ideal CPU fraction.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:48:45 UTC 2024 - 52.3K bytes - Viewed (0) -
src/runtime/chan.go
} c.qcount++ unlock(&c.lock) return true } if !block { unlock(&c.lock) return false } // Block on the channel. Some receiver will complete our operation for us. gp := getg() mysg := acquireSudog() mysg.releasetime = 0 if t0 != 0 { mysg.releasetime = -1 } // No stack splits between assigning elem and enqueuing mysg // on gp.waiting where copystack can find it.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:16:50 UTC 2024 - 25.9K bytes - Viewed (0)