- Sort Score
- Result 10 results
- Languages All
Results 141 - 150 of 357 for Atack (0.03 sec)
-
test/fixedbugs/issue20780.go
// license that can be found in the LICENSE file. // We have a limit of 1GB for stack frames. // Make sure we include the callee args section. package main type Big = [400e6]byte func f() { // GC_ERROR "stack frame too large" // Note: This test relies on the fact that we currently always // spill function-results to the stack, even if they're so // large that we would normally heap allocate them. If we ever
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sun Jan 10 08:01:49 UTC 2021 - 755 bytes - Viewed (0) -
test/live2.go
return &ret } func bad40() { t := newT40() // ERROR "stack object ret T40$" "stack object .autotmp_[0-9]+ runtime.hmap$" printnl() // ERROR "live at call to printnl: ret$" useT40(t) } func good40() { ret := T40{} // ERROR "stack object ret T40$" ret.m = make(map[int]int, 42) // ERROR "stack object .autotmp_[0-9]+ runtime.hmap$" t := &ret
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Aug 21 23:29:33 UTC 2023 - 953 bytes - Viewed (0) -
src/runtime/tracebackx_test.go
// license that can be found in the LICENSE file. package runtime func XTestSPWrite(t TestingT) { // Test that we can traceback from the stack check prologue of a function // that writes to SP. See #62326. // Start a goroutine to minimize the initial stack and ensure we grow the stack. done := make(chan bool) go func() { testSPWrite() // Defined in assembly done <- true }() <-done
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Sep 06 14:45:46 UTC 2023 - 509 bytes - Viewed (0) -
src/runtime/trace_cgo_test.go
if wantEvent := logs[category]; wantEvent == nil { logs[category] = &event } else if got, want := dumpStackV2(&event), dumpStackV2(wantEvent); got != want { t.Errorf("%q: got stack:\n%s\nwant stack:\n%s\n", category, got, want) } } } } func mustFindLogV2(t *testing.T, trc io.Reader, category string) trace.Event { r, err := trace.NewReader(trc) if err != nil {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 2.6K bytes - Viewed (0) -
test/codegen/zerosize.go
// Make sure a pointer variable and a zero-sized variable // aren't allocated to the same stack slot. // See issue 24993. package codegen func zeroSize() { c := make(chan struct{}) // amd64:`MOVQ\t\$0, command-line-arguments\.s\+56\(SP\)` var s *int // force s to be a stack object, also use some (fixed) stack space g(&s, 1, 2, 3, 4, 5) // amd64:`LEAQ\tcommand-line-arguments\..*\+55\(SP\)` c <- struct{}{}
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 16 18:19:47 UTC 2022 - 650 bytes - Viewed (0) -
test/fixedbugs/issue32288.go
var res []T for { var e *T res = append(res, *e) } } func main() { defer func() { useStack(100) // force a stack copy // We're expecting a panic. // The bug in this issue causes a throw, which this recover() will not squash. recover() }() junk() // fill the stack with invalid pointers f(nil, nil) } func useStack(n int) { if n == 0 { return } useStack(n - 1) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 31 21:52:17 UTC 2019 - 809 bytes - Viewed (0) -
test/fixedbugs/issue26407.go
// license that can be found in the LICENSE file. // Issue 26407: ensure that stack variables which have // had their address taken and then used in a comparison, // but are otherwise unused, are cleared. package main func main() { poison() test() } //go:noinline func poison() { // initialise the stack with invalid pointers var large [256]uintptr for i := range large { large[i] = 1 }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jul 17 14:58:54 UTC 2018 - 964 bytes - Viewed (0) -
src/cmd/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go
// Find the set of cancel vars to analyze. stack := make([]ast.Node, 0, 32) ast.Inspect(node, func(n ast.Node) bool { switch n.(type) { case *ast.FuncLit: if len(stack) > 0 { return false // don't stray into nested functions } case nil: stack = stack[:len(stack)-1] // pop return true } stack = append(stack, n) // push
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Jan 22 19:00:13 UTC 2024 - 9K bytes - Viewed (0) -
src/runtime/mem_bsd.go
//go:build dragonfly || freebsd || netbsd || openbsd || solaris package runtime import ( "unsafe" ) // Don't split the stack as this function may be invoked without a valid G, // which prevents us from allocating more stack. // //go:nosplit func sysAllocOS(n uintptr) unsafe.Pointer { v, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0) if err != 0 { return nil
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Aug 22 19:05:10 UTC 2023 - 2.2K bytes - Viewed (0) -
test/fixedbugs/issue16095.go
y[i] = 99 } // Make sure y is heap allocated. sink = y panic(nil) // After the recover we reach the deferreturn, which // copies the heap version of x back to the stack. // It gets the pointer to x from a stack slot that was // not marked as live during the call to runtime.GC(). } var sinkint int func g(p *int) (x [20]byte) { // Initialize x. for i := range x { x[i] = byte(i) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Jun 27 16:48:48 UTC 2016 - 1.9K bytes - Viewed (0)