- Sort Score
- Result 10 results
- Languages All
Results 61 - 70 of 159 for getPkg (0.26 sec)
-
src/runtime/print.go
func printlock() { mp := getg().m mp.locks++ // do not reschedule between printlock++ and lock(&debuglock). mp.printlock++ if mp.printlock == 1 { lock(&debuglock) } mp.locks-- // now we know debuglock is held and holding up mp.locks for us. } func printunlock() { mp := getg().m mp.printlock-- if mp.printlock == 0 { unlock(&debuglock) } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jan 20 03:27:26 UTC 2023 - 5.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc
preceding_op)) { if (auto q_op = llvm::dyn_cast_or_null<quantfork::QuantizeCastOp>( dq_op.getArg().getDefiningOp())) { Operation* q_op_input = q_op.getArg().getDefiningOp(); is_weight_constant = q_op_input && q_op_input->hasTrait<OpTrait::ConstantLike>(); } } if (!is_weight_constant) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 16.4K bytes - Viewed (0) -
src/runtime/export_test.go
} return } func GetNextArenaHint() uintptr { return mheap_.arenaHints.addr } type G = g type Sudog = sudog func Getg() *G { return getg() } func Goid() uint64 { return getg().goid } func GIsWaitingOnMutex(gp *G) bool { return readgstatus(gp) == _Gwaiting && gp.waitreason.isMutexWait() } var CasGStatusAlwaysTrack = &casgstatusAlwaysTrack
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:50:53 UTC 2024 - 46.1K bytes - Viewed (0) -
src/runtime/lockrank_off.go
} func unlockWithRank(l *mutex) { unlock2(l) } // This function may be called in nosplit context and thus must be nosplit. // //go:nosplit func releaseLockRankAndM(rank lockRank) { releasem(getg().m) } func lockWithRankMayAcquire(l *mutex, rank lockRank) { } //go:nosplit func assertLockHeld(l *mutex) { } //go:nosplit func assertRankHeld(r lockRank) { } //go:nosplit
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 22 14:29:04 UTC 2024 - 1.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/passes/rewrite_quantized_io.cc
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 08 01:19:25 UTC 2023 - 4.5K bytes - Viewed (0) -
src/runtime/os_plan9.go
var buf [_ERRMAX]byte if !atomic.Cas(&exiting, 0, 1) { return } getg().m.locks++ n := copy(buf[:], goexits) n = copy(buf[n:], gostringnocopy(status)) pid := getpid() for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink { if mp.procid != 0 && mp.procid != pid { postnote(mp.procid, buf[:]) } } getg().m.locks-- } var procdir = []byte("/proc/")
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 15:41:45 UTC 2024 - 10.3K bytes - Viewed (0) -
src/runtime/rand.go
// //go:nosplit //go:linkname rand func rand() uint64 { // Note: We avoid acquirem here so that in the fast path // there is just a getg, an inlined c.Next, and a return. // The performance difference on a 16-core AMD is // 3.7ns/call this way versus 4.3ns/call with acquirem (+16%). mp := getg().m c := &mp.chacha8 for { // Note: c.Next is marked nosplit, // so we don't need to use mp.locks
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 31 14:32:47 UTC 2024 - 8K bytes - Viewed (0) -
src/runtime/coro.go
// and then calls coroexit to remove the extra concurrency. func corostart() { gp := getg() c := gp.coroarg gp.coroarg = nil defer coroexit(c) c.f(c) } // coroexit is like coroswitch but closes the coro // and exits the current goroutine func coroexit(c *coro) { gp := getg() gp.coroarg = c gp.coroexit = true mcall(coroswitch_m) } //go:linkname coroswitch
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 19:09:18 UTC 2024 - 7.4K bytes - Viewed (0) -
src/runtime/HACKING.md
so their memory remains type stable. As a result, the runtime can avoid write barriers in the depths of the scheduler. `getg()` and `getg().m.curg` ---------------------------- To get the current user `g`, use `getg().m.curg`. `getg()` alone returns the current `g`, but when executing on the system or signal stacks, this will return the current M's "g0" or
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 13.9K bytes - Viewed (0) -
src/runtime/debug.go
// // Ensure mayMoreStackPreempt can be called for all ABIs. // //go:nosplit //go:linkname mayMoreStackPreempt func mayMoreStackPreempt() { // Don't do anything on the g0 or gsignal stack. gp := getg() if gp == gp.m.g0 || gp == gp.m.gsignal { return } // Force a preemption, unless the stack is already poisoned. if gp.stackguard0 < stackPoisonMin { gp.stackguard0 = stackPreempt } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat May 11 20:38:24 UTC 2024 - 4.2K bytes - Viewed (0)