- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 324 for getZ (0.08 sec)
-
src/cmd/compile/internal/test/inl_test.go
"(*mspan).base", "(*mspan).markBitsForBase", "(*mspan).markBitsForIndex", "(*mspan).writeUserArenaHeapBits", "(*muintptr).set", "(*puintptr).set", "(*wbBuf).get1", "(*wbBuf).get2", // Trace-related ones. "traceLocker.ok", "traceEnabled", }, "runtime/internal/sys": {}, "runtime/internal/math": { "MulUintptr", }, "bytes": {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 09 04:07:57 UTC 2024 - 10.7K bytes - Viewed (0) -
src/runtime/rand.go
// //go:nosplit //go:linkname rand func rand() uint64 { // Note: We avoid acquirem here so that in the fast path // there is just a getg, an inlined c.Next, and a return. // The performance difference on a 16-core AMD is // 3.7ns/call this way versus 4.3ns/call with acquirem (+16%). mp := getg().m c := &mp.chacha8 for { // Note: c.Next is marked nosplit, // so we don't need to use mp.locks
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 31 14:32:47 UTC 2024 - 8K bytes - Viewed (0) -
src/runtime/coro.go
// and then calls coroexit to remove the extra concurrency. func corostart() { gp := getg() c := gp.coroarg gp.coroarg = nil defer coroexit(c) c.f(c) } // coroexit is like coroswitch but closes the coro // and exits the current goroutine func coroexit(c *coro) { gp := getg() gp.coroarg = c gp.coroexit = true mcall(coroswitch_m) } //go:linkname coroswitch
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 19:09:18 UTC 2024 - 7.4K bytes - Viewed (0) -
src/runtime/debug.go
// // Ensure mayMoreStackPreempt can be called for all ABIs. // //go:nosplit //go:linkname mayMoreStackPreempt func mayMoreStackPreempt() { // Don't do anything on the g0 or gsignal stack. gp := getg() if gp == gp.m.g0 || gp == gp.m.gsignal { return } // Force a preemption, unless the stack is already poisoned. if gp.stackguard0 < stackPoisonMin { gp.stackguard0 = stackPreempt } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat May 11 20:38:24 UTC 2024 - 4.2K bytes - Viewed (0) -
src/runtime/proc.go
// - fortio.org/log // // Do not remove or change the type signature. // See go.dev/issue/67401. // //go:linkname getm func getm() uintptr { return uintptr(unsafe.Pointer(getg().m)) } var ( // Locking linked list of extra M's, via mp.schedlink. Must be accessed // only via lockextra/unlockextra. // // Can't be atomic.Pointer[m] because we use an invalid pointer as a
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 207.5K bytes - Viewed (0) -
src/runtime/tracestack.go
// symbolizer. pcBuf[0] = logicalStackSentinel if getg() == gp { nstk += callers(skip+1, pcBuf[1:]) } else if gp != nil { nstk += gcallers(gp, skip, pcBuf[1:]) } } else { // Fast path: Unwind using frame pointers. pcBuf[0] = uintptr(skip) if getg() == gp { nstk += fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf[1:]) } else if gp != nil { // Three cases:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 21 14:38:56 UTC 2024 - 11K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf.cc
auto l = mlir::dyn_cast<TypedValue<RankedTensorType>>(op.getX()); auto r = mlir::dyn_cast<TypedValue<RankedTensorType>>(op.getY()); if (!l || !r) return failure(); auto element_type = getElementTypeOrSelf(l.getType()); if (!element_type.isBF16()) return failure(); auto out_type = op.getZ().getType(); l = rewriter.create<ConvertOp>(op.getLoc(), l, rewriter.getF32Type());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 20:00:43 UTC 2024 - 291.8K bytes - Viewed (0) -
src/runtime/signal_unix.go
// sigpanic calls can be injected. // // The signal handler must not inject a call to sigpanic if // getg().throwsplit, since sigpanic may need to grow the stack. // // This is exported via linkname to assembly in runtime/cgo. // //go:linkname sigpanic func sigpanic() { gp := getg() if !canpanic() { throw("unexpected signal during runtime execution") } switch gp.sig { case _SIGBUS:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 10 16:04:54 UTC 2024 - 45K bytes - Viewed (0) -
src/runtime/panic.go
if goarch.IsWasm == 0 && stringslite.HasPrefix(funcname(findfunc(pc)), "runtime.") { // Note: wasm can't tail call, so we can't get the original caller's pc. throw(msg) } // TODO: is this redundant? How could we be in malloc // but not in the runtime? runtime/internal/*, maybe? gp := getg() if gp != nil && gp.m != nil && gp.m.mallocing != 0 { throw(msg) } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 43.8K bytes - Viewed (0) -
src/runtime/rwmutex.go
lock(&rw.rLock) if rw.readerPass > 0 { // Writer finished. rw.readerPass -= 1 unlock(&rw.rLock) } else { // Queue this reader to be woken by // the writer. m := getg().m m.schedlink = rw.readers rw.readers.set(m) unlock(&rw.rLock) notesleep(&m.park) noteclear(&m.park) } }) } } // runlock undoes a single rlock call on rw.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 22 14:29:04 UTC 2024 - 5K bytes - Viewed (0)