- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 12 for vmkcall (0.15 sec)
-
pilot/pkg/networking/core/peer_authentication_simulation_test.go
Call: mkCall(8000, simulation.Plaintext), Result: simulation.Result{ClusterMatched: "InboundPassthroughCluster"}, }, { Name: "mtls on port 8000", Call: mkCall(8000, simulation.MTLS), Result: simulation.Result{ClusterMatched: "InboundPassthroughCluster"}, }, { Name: "plaintext port 9000", Call: mkCall(9000, simulation.Plaintext),
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Thu Jun 13 01:56:28 UTC 2024 - 16.2K bytes - Viewed (0) -
src/runtime/stubs.go
func getg() *g // mcall switches from the g to the g0 stack and invokes fn(g), // where g is the goroutine that made the call. // mcall saves g's current PC/SP in g->sched so that it can be restored later. // It is up to fn to arrange for that later execution, typically by recording // g in a data structure, causing something to call ready(g) later.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 20.2K bytes - Viewed (0) -
src/runtime/preempt.go
// // asyncPreempt is implemented in assembly. func asyncPreempt() //go:nosplit func asyncPreempt2() { gp := getg() gp.asyncSafePoint = true if gp.preemptStop { mcall(preemptPark) } else { mcall(gopreempt_m) } gp.asyncSafePoint = false } // asyncPreemptStack is the bytes of stack space required to inject an // asyncPreempt call. var asyncPreemptStack = ^uintptr(0)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 15:41:45 UTC 2024 - 15.1K bytes - Viewed (0) -
src/runtime/tracestack.go
pcBuf[0] = uintptr(skip) if getg() == gp { nstk += fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf[1:]) } else if gp != nil { // Three cases: // // (1) We're called on the g0 stack through mcall(fn) or systemstack(fn). To // behave like gcallers above, we start unwinding from sched.bp, which // points to the caller frame of the leaf frame on g's stack. The return
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 21 14:38:56 UTC 2024 - 11K bytes - Viewed (0) -
src/runtime/race_amd64.s
// C->Go callback thunk that allows to call runtime·racesymbolize from C code. // Direct Go->C race call has only switched SP, finish g->g0 switch by setting correct g. // The overall effect of Go->C->Go call chain is similar to that of mcall. // RARG0 contains command code. RARG1 contains command-specific context. // See racecallback for command codes. TEXT runtime·racecallbackthunk(SB), NOSPLIT|NOFRAME, $0-0 // Handle command raceGetProcCmd (0) here.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:37:29 UTC 2024 - 15.1K bytes - Viewed (0) -
src/runtime/race_arm64.s
// C->Go callback thunk that allows to call runtime·racesymbolize from C code. // Direct Go->C race call has only switched SP, finish g->g0 switch by setting correct g. // The overall effect of Go->C->Go call chain is similar to that of mcall. // R0 contains command code. R1 contains command-specific context. // See racecallback for command codes. TEXT runtime·racecallbackthunk(SB), NOSPLIT|NOFRAME, $0 // Handle command raceGetProcCmd (0) here.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:37:29 UTC 2024 - 15.5K bytes - Viewed (0) -
src/runtime/race_ppc64le.s
// C->Go callback thunk that allows to call runtime·racesymbolize from C code. // Direct Go->C race call has only switched SP, finish g->g0 switch by setting correct g. // The overall effect of Go->C->Go call chain is similar to that of mcall. // RARG0 contains command code. RARG1 contains command-specific context. // See racecallback for command codes. TEXT runtime·racecallbackthunk(SB), NOSPLIT|NOFRAME, $0 // Handle command raceGetProcCmd (0) here.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:37:29 UTC 2024 - 17K bytes - Viewed (0) -
src/runtime/asm_loong64.s
MOVV gobuf_ret(R4), R19 MOVV gobuf_ctxt(R4), REGCTXT MOVV R0, gobuf_sp(R4) MOVV R0, gobuf_ret(R4) MOVV R0, gobuf_lr(R4) MOVV R0, gobuf_ctxt(R4) MOVV gobuf_pc(R4), R6 JMP (R6) // void mcall(fn func(*g)) // Switch to m->g0's stack, call fn(g). // Fn must never return. It should gogo(&g->sched) // to keep running g. TEXT runtime·mcall<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-8 MOVV R4, REGCTXT
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 13 15:04:25 UTC 2024 - 26.5K bytes - Viewed (0) -
src/runtime/metrics_test.go
for i, stk := range acceptStacks { if goexperiment.StaticLockRanking { if !slices.ContainsFunc(stk, func(s string) bool { return s == "runtime.systemstack" || s == "runtime.mcall" || s == "runtime.mstart" }) { // stk is a call stack that is still on the user stack when // it calls runtime.unlock. Add the extra function that
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:52:17 UTC 2024 - 45K bytes - Viewed (0) -
src/runtime/panic.go
func (p *_panic) nextDefer() (func(), bool) { gp := getg() if !p.deferreturn { if gp._panic != p { throw("bad panic stack") } if p.recovered { mcall(recovery) // does not return throw("recovery failed") } } // The assembler adjusts p.argp in wrapper functions that shouldn't // be visible to recover(), so we need to restore it each iteration.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 43.8K bytes - Viewed (0)