- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 120 for Atack (0.07 sec)
-
src/runtime/preempt.go
// stack. // // Synchronous safe-points are implemented by overloading the stack // bound check in function prologues. To preempt a goroutine at the // next synchronous safe-point, the runtime poisons the goroutine's // stack bound to a value that will cause the next stack bound check // to fail and enter the stack growth implementation, which will // detect that it was actually a preemption and redirect to preemption // handling. //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 15:41:45 UTC 2024 - 15.1K bytes - Viewed (0) -
src/internal/trace/trace_test.go
if !overflowed { t.Fail() } } for stack, traceSamples := range traceStacks { pprofSamples := pprofStacks[stack] delete(pprofStacks, stack) if traceSamples < pprofSamples { t.Logf("execution trace did not include all CPU profile samples for stack %q; %d in profile, %d in trace", stack, pprofSamples, traceSamples) if !overflowed { t.Fail() } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 18.5K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/schedule.go
// in sset means v is a store, or already pushed to stack, or already assigned a store number continue } stack = append(stack, v) sset.add(v.ID) for len(stack) > 0 { w := stack[len(stack)-1] if storeNumber[w.ID] != 0 { stack = stack[:len(stack)-1] continue } if w.Op == OpPhi { // Phi value doesn't depend on store in the current block.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 08 15:53:17 UTC 2024 - 16.4K bytes - Viewed (0) -
src/cmd/compile/internal/types2/unify.go
// endless recursion occurs if the cycle is not detected. // // If x and y were compared before, they must be equal // (if they were not, the recursion would have stopped); // search the ifacePair stack for the same pair. // // This is a quadratic algorithm, but in practice these stacks // are extremely short (bounded by the nesting depth of interface
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jun 11 16:24:39 UTC 2024 - 27.8K bytes - Viewed (0) -
src/runtime/stubs.go
// // This must NOT be go:noescape: if fn is a stack-allocated closure, // fn puts g on a run queue, and g executes before fn returns, the // closure will be invalidated while it is still executing. func mcall(fn func(*g)) // systemstack runs fn on a system stack. // If systemstack is called from the per-OS-thread (g0) stack, or // if systemstack is called from the signal handling (gsignal) stack, // systemstack calls fn directly and returns.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 20.2K bytes - Viewed (0) -
src/runtime/mbarrier.go
// the stack to the heap, but this requires first having a pointer // hidden on the stack. Immediately after a stack is scanned, it only // points to shaded objects, so it's not hiding anything, and the // shade(*slot) prevents it from hiding any other pointers on its // stack. // // For a detailed description of this barrier and proof of
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 15.7K bytes - Viewed (0) -
src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
{11, "SIGSEGV", "segmentation fault"}, {12, "SIGUSR2", "user defined signal 2"}, {13, "SIGPIPE", "broken pipe"}, {14, "SIGALRM", "alarm clock"}, {15, "SIGTERM", "terminated"}, {16, "SIGSTKFLT", "stack fault"}, {17, "SIGCHLD", "child exited"}, {18, "SIGCONT", "continued"}, {19, "SIGSTOP", "stopped (signal)"}, {20, "SIGTSTP", "stopped"}, {21, "SIGTTIN", "stopped (tty input)"},
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jun 04 16:19:04 UTC 2024 - 34.2K bytes - Viewed (0) -
src/cmd/compile/internal/ssagen/abi.go
fn.SetABIWrapper(true) fn.SetDupok(true) // ABI0-to-ABIInternal wrappers will be mainly loading params from // stack into registers (and/or storing stack locations back to // registers after the wrapped call); in most cases they won't // need to allocate stack space, so it should be OK to mark them // as NOSPLIT in these cases. In addition, my assumption is that
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 19:57:43 UTC 2024 - 13.8K bytes - Viewed (0) -
src/runtime/crash_cgo_test.go
t.Skip("skipping windows specific test") } testenv.SkipFlaky(t, 22575) o := runTestProg(t, "testprogcgo", "StackMemory") stackUsage, err := strconv.Atoi(o) if err != nil { t.Fatalf("Failed to read stack usage: %v", err) } if expected, got := 100<<10, stackUsage; got > expected { t.Fatalf("expected < %d bytes of memory per thread, got %d", expected, got) } } func TestSigStackSwapping(t *testing.T) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 15 16:44:47 UTC 2024 - 22.2K bytes - Viewed (0) -
src/internal/bisect/bisect.go
return dst } // MatchStack assigns the current call stack a change ID. // If the stack should be printed, MatchStack prints it. // Then MatchStack reports whether a change at the current call stack should be enabled. func (m *Matcher) Stack(w Writer) bool { if m == nil { return true } return m.stack(w) } // stack does the real work for Stack. // This lets stack's body handle m == nil and potentially be inlined.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 03 17:28:43 UTC 2024 - 22.9K bytes - Viewed (0)