- Sort Score
- Result 10 results
- Languages All
Results 91 - 100 of 226 for Atack (0.03 sec)
-
src/syscall/syscall_linux.go
// // //go:uintptrkeepalive because the uintptr argument may be converted pointers // that need to be kept alive in the caller. // // //go:nosplit because stack copying does not account for uintptrkeepalive, so // the stack must not grow. Stack copying cannot blindly assume that all // uintptr arguments are pointers, because some values may look like pointers,
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 20:12:46 UTC 2024 - 35.7K bytes - Viewed (0) -
src/cmd/go/script_test.go
// grace periods to clean up. We will send the first termination signal when // the context expires, then wait one grace period for the process to // produce whatever useful output it can (such as a stack trace). After the // first grace period expires, we'll escalate to os.Kill, leaving the second // grace period for the test function to record its output before the test // process itself terminates.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 03 18:15:22 UTC 2024 - 12.2K bytes - Viewed (0) -
src/runtime/preempt.go
// stack. // // Synchronous safe-points are implemented by overloading the stack // bound check in function prologues. To preempt a goroutine at the // next synchronous safe-point, the runtime poisons the goroutine's // stack bound to a value that will cause the next stack bound check // to fail and enter the stack growth implementation, which will // detect that it was actually a preemption and redirect to preemption // handling. //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 15:41:45 UTC 2024 - 15.1K bytes - Viewed (0) -
src/cmd/trace/goroutinegen.go
gs.augmentName(st.Stack) // Handle the goroutine state transition. from, to := st.Goroutine() if from == to { // Filter out no-op events. return } if from.Executing() && !to.Executing() { if to == trace.GoWaiting { // Goroutine started blocking. gs.block(ev.Time(), ev.Stack(), st.Reason, ctx) } else { gs.stop(ev.Time(), ev.Stack(), ctx) } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 4.6K bytes - Viewed (0) -
src/runtime/debug.go
// // This must be deeply nosplit because it is called from a function // prologue before the stack is set up and because the compiler will // call it from any splittable prologue (leading to infinite // recursion). // // Ideally it should also use very little stack because the linker // doesn't currently account for this in nosplit stack depth checking. // // Ensure mayMoreStackPreempt can be called for all ABIs. // //go:nosplit
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat May 11 20:38:24 UTC 2024 - 4.2K bytes - Viewed (0) -
src/internal/trace/trace_test.go
if !overflowed { t.Fail() } } for stack, traceSamples := range traceStacks { pprofSamples := pprofStacks[stack] delete(pprofStacks, stack) if traceSamples < pprofSamples { t.Logf("execution trace did not include all CPU profile samples for stack %q; %d in profile, %d in trace", stack, pprofSamples, traceSamples) if !overflowed { t.Fail() } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 18.5K bytes - Viewed (0) -
src/runtime/trace_cgo_test.go
if wantEvent := logs[category]; wantEvent == nil { logs[category] = &event } else if got, want := dumpStackV2(&event), dumpStackV2(wantEvent); got != want { t.Errorf("%q: got stack:\n%s\nwant stack:\n%s\n", category, got, want) } } } } func mustFindLogV2(t *testing.T, trc io.Reader, category string) trace.Event { r, err := trace.NewReader(trc) if err != nil {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 2.6K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/schedule.go
// in sset means v is a store, or already pushed to stack, or already assigned a store number continue } stack = append(stack, v) sset.add(v.ID) for len(stack) > 0 { w := stack[len(stack)-1] if storeNumber[w.ID] != 0 { stack = stack[:len(stack)-1] continue } if w.Op == OpPhi { // Phi value doesn't depend on store in the current block.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 08 15:53:17 UTC 2024 - 16.4K bytes - Viewed (0) -
src/cmd/compile/internal/types2/unify.go
// endless recursion occurs if the cycle is not detected. // // If x and y were compared before, they must be equal // (if they were not, the recursion would have stopped); // search the ifacePair stack for the same pair. // // This is a quadratic algorithm, but in practice these stacks // are extremely short (bounded by the nesting depth of interface
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jun 11 16:24:39 UTC 2024 - 27.8K bytes - Viewed (0) -
src/runtime/stubs.go
// // This must NOT be go:noescape: if fn is a stack-allocated closure, // fn puts g on a run queue, and g executes before fn returns, the // closure will be invalidated while it is still executing. func mcall(fn func(*g)) // systemstack runs fn on a system stack. // If systemstack is called from the per-OS-thread (g0) stack, or // if systemstack is called from the signal handling (gsignal) stack, // systemstack calls fn directly and returns.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 20.2K bytes - Viewed (0)