- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 461 for Atack (0.07 sec)
-
src/runtime/mem_darwin.go
// Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package runtime import ( "unsafe" ) // Don't split the stack as this function may be invoked without a valid G, // which prevents us from allocating more stack. // //go:nosplit func sysAllocOS(n uintptr) unsafe.Pointer { v, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0) if err != 0 { return nil
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Aug 22 19:05:10 UTC 2023 - 2K bytes - Viewed (0) -
src/internal/trace/internal/testgen/go122/trace.go
g.strings[s] = id return id } // Stack registers a stack with the trace. // // This is a convenience function for easily adding correct // stacks to traces. func (g *Generation) Stack(stk []trace.StackFrame) uint64 { if len(stk) == 0 { return 0 } if len(stk) > 32 { panic("stack too big for test") } var stkc stack copy(stkc.stk[:], stk) stkc.len = len(stk)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 9.7K bytes - Viewed (0) -
src/runtime/syscall_windows.go
// // For cdecl and stdcall, all arguments are on the stack. // // For fastcall, the trampoline spills register arguments to // the reserved spill slots below the stack arguments, // resulting in a layout equivalent to stdcall. // // For arm, the trampoline stores the register arguments just // below the stack arguments, so again we can treat it as one // big stack arguments frame. args unsafe.Pointer
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 20:12:46 UTC 2024 - 16.6K bytes - Viewed (0) -
test/linknameasm.dir/x.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 15:22:22 UTC 2024 - 451 bytes - Viewed (0) -
src/runtime/callers_test.go
// running the remaining deferred functions. // // This test does not verify the accuracy of the call stack (it // currently includes a frame from runtime.deferreturn which would // normally be omitted). It is only intended to check that producing the // call stack won't crash. defer func() { pcs := make([]uintptr, 32) for i := range pcs { // If runtime.recovery doesn't properly restore the
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Aug 28 21:36:31 UTC 2023 - 12.1K bytes - Viewed (0) -
src/runtime/signal_riscv64.go
// preparePanic sets up the stack to look like a call to sigpanic. func (c *sigctxt) preparePanic(sig uint32, gp *g) { // We arrange RA, and pc to pretend the panicking // function calls sigpanic directly. // Always save RA to stack so that panics in leaf // functions are correctly handled. This smashes // the stack frame but we're not going back there // anyway. sp := c.sp() - goarch.PtrSize
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Oct 04 02:55:17 UTC 2023 - 2.9K bytes - Viewed (0) -
src/runtime/traceruntime.go
// // nosplit because it's called on the syscall path when stack movement is forbidden. // //go:nosplit func (tl traceLocker) ok() bool { return tl.gen != 0 } // traceRelease indicates that this M is done writing trace events. // // nosplit because it's called on the syscall path when stack movement is forbidden. // //go:nosplit func traceRelease(tl traceLocker) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 25.7K bytes - Viewed (0) -
test/fixedbugs/issue16016.go
_ = v } } type R struct{ *T } type Q interface { Foo([]interface{}) } func main() { var count = 10000 if runtime.Compiler == "gccgo" { // On targets without split-stack libgo allocates // a large stack for each goroutine. On 32-bit // systems this test can run out of memory. const intSize = 32 << (^uint(0) >> 63) // 32 or 64 if intSize < 64 { count = 100 } } var q Q = &R{&T{}}
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Mar 27 18:39:06 UTC 2024 - 877 bytes - Viewed (0) -
src/runtime/mem_windows.go
_PAGE_READWRITE = 0x0004 _PAGE_NOACCESS = 0x0001 _ERROR_NOT_ENOUGH_MEMORY = 8 _ERROR_COMMITMENT_LIMIT = 1455 ) // Don't split the stack as this function may be invoked without a valid G, // which prevents us from allocating more stack. // //go:nosplit func sysAllocOS(n uintptr) unsafe.Pointer { return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_COMMIT|_MEM_RESERVE, _PAGE_READWRITE)) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Aug 22 19:05:10 UTC 2023 - 3.9K bytes - Viewed (0) -
src/cmd/compile/internal/test/mergelocals_test.go
testenv.MustHaveGoBuild(t) // This test does a build of a specific canned package to // check whether merging of stack slots is taking place. // The idea is to do the compile with a trace option turned // on and then pick up on the frame offsets of specific // variables. // // Stack slot merging is a greedy algorithm, and there can // be many possible ways to overlap a given set of candidate
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 18 15:43:53 UTC 2024 - 4.7K bytes - Viewed (0)