- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 161 for mp (0.23 sec)
-
src/runtime/preempt.go
ready(gp, 0, true) } } // canPreemptM reports whether mp is in a state that is safe to preempt. // // It is nosplit because it has nosplit callers. // //go:nosplit func canPreemptM(mp *m) bool { return mp.locks == 0 && mp.mallocing == 0 && mp.preemptoff == "" && mp.p.ptr().status == _Prunning } //go:generate go run mkpreempt.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 15:41:45 UTC 2024 - 15.1K bytes - Viewed (0) -
src/runtime/os_freebsd.go
// //go:nowritebarrier func newosproc(mp *m) { stk := unsafe.Pointer(mp.g0.stack.hi) if false { print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " thr_start=", abi.FuncPCABI0(thr_start), " id=", mp.id, " ostk=", &mp, "\n") } param := thrparam{ start_func: abi.FuncPCABI0(thr_start), arg: unsafe.Pointer(mp), stack_base: mp.g0.stack.lo, stack_size: uintptr(stk) - mp.g0.stack.lo,
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Dec 05 20:34:30 UTC 2023 - 11.6K bytes - Viewed (0) -
src/runtime/tracebuf.go
return traceWriter{traceLocker: traceLocker{gen: gen}, traceBuf: buf} } // end writes the buffer back into the m. func (w traceWriter) end() { if w.mp == nil { // Tolerate a nil mp. It makes code that creates traceWriters directly // less error-prone. return } w.mp.trace.buf[w.gen%2] = w.traceBuf } // ensure makes sure that at least maxSize bytes are available to write. // // Returns whether the buffer was flushed.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:47:01 UTC 2024 - 6.8K bytes - Viewed (0) -
src/runtime/rand.go
mp := getg().m c := &mp.chacha8 for { // Note: c.Next is marked nosplit, // so we don't need to use mp.locks // on the fast path, which is that the // first attempt succeeds. x, ok := c.Next() if ok { return x } mp.locks++ // hold m even though c.Refill may do stack split checks c.Refill() mp.locks-- } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 31 14:32:47 UTC 2024 - 8K bytes - Viewed (0) -
src/runtime/proc.go
if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 { for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 { cgoOff++ } n += copy(stk[:], mp.cgoCallers[:cgoOff]) mp.cgoCallers[0] = 0 } // Collect Go stack that leads to the cgo call. u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 207.5K bytes - Viewed (0) -
src/runtime/mprof.go
first := (*m)(atomic.Loadp(unsafe.Pointer(&allm))) for mp := first; mp != nil; mp = mp.alllink { n++ } if n <= size { ok = true for mp := first; mp != nil; mp = mp.alllink { r := profilerecord.StackRecord{Stack: mp.createstack[:]} copyFn(r) } } return } //go:linkname pprof_threadCreateInternal
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:57:37 UTC 2024 - 53.3K bytes - Viewed (0) -
pilot/pkg/leaderelection/k8sleaderelection/metrics.go
onlyOnce sync.Once } func (f *leaderMetricsFactory) setProvider(mp MetricsProvider) { f.onlyOnce.Do(func() { f.metricsProvider = mp }) } func (f *leaderMetricsFactory) newLeaderMetrics() leaderMetricsAdapter { mp := f.metricsProvider if mp == (noopMetricsProvider{}) { return noMetrics{} } return &defaultLeaderMetrics{ leader: mp.NewLeaderMetric(), } }
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Mon Oct 11 16:58:48 UTC 2021 - 2.6K bytes - Viewed (0) -
src/runtime/os_netbsd_arm64.go
"unsafe" ) func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintptr) { // Machine dependent mcontext initialisation for LWP. mc.__gregs[_REG_ELR] = uint64(abi.FuncPCABI0(lwp_tramp)) mc.__gregs[_REG_X31] = uint64(uintptr(stk)) mc.__gregs[_REG_X0] = uint64(uintptr(unsafe.Pointer(mp))) mc.__gregs[_REG_X1] = uint64(uintptr(unsafe.Pointer(mp.g0))) mc.__gregs[_REG_X2] = uint64(fn) } //go:nosplit
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Dec 05 20:34:30 UTC 2023 - 769 bytes - Viewed (0) -
src/runtime/tracestack.go
// generations moving forward. Prefer traceEventWriter.stack. func traceStack(skip int, gp *g, gen uintptr) uint64 { var pcBuf [traceStackSize]uintptr // Figure out gp and mp for the backtrace. var mp *m if gp == nil { mp = getg().m gp = mp.curg } // Double-check that we own the stack we're about to trace. if debug.traceCheckStackOwnership != 0 && gp != nil { status := readgstatus(gp)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 21 14:38:56 UTC 2024 - 11K bytes - Viewed (0) -
src/runtime/os_netbsd_386.go
import ( "internal/abi" "unsafe" ) func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintptr) { // Machine dependent mcontext initialisation for LWP. mc.__gregs[_REG_EIP] = uint32(abi.FuncPCABI0(lwp_tramp)) mc.__gregs[_REG_UESP] = uint32(uintptr(stk)) mc.__gregs[_REG_EBX] = uint32(uintptr(unsafe.Pointer(mp))) mc.__gregs[_REG_EDX] = uint32(uintptr(unsafe.Pointer(gp))) mc.__gregs[_REG_ESI] = uint32(fn)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 21 22:12:04 UTC 2021 - 617 bytes - Viewed (0)