- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 107 for Atack (0.03 sec)
-
src/reflect/export_test.go
} // Extract size information. argSize = abid.stackCallArgsSize retOffset = abid.retOffset frametype = toType(ft) // Expand stack pointer bitmap into byte-map. for i := uint32(0); i < abid.stackPtrs.n; i++ { stack = append(stack, abid.stackPtrs.data[i/8]>>(i%8)&1) } // Expand register pointer bitmaps into byte-maps. bool2byte := func(b bool) byte { if b { return 1 }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 02 15:10:48 UTC 2024 - 3.8K bytes - Viewed (0) -
src/internal/trace/event/event.go
Args []string // StringIDs indicates which of the arguments are string IDs. StringIDs []int // StackIDs indicates which of the arguments are stack IDs. // // The list is not sorted. The first index always refers to // the main stack for the current execution context of the event. StackIDs []int // StartEv indicates the event type of the corresponding "start"
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 3.4K bytes - Viewed (0) -
src/internal/trace/testdata/testprog/cpu-profile.go
pprofStacks[stack] += samples } } for stack, samples := range pprofStacks { fmt.Fprintf(os.Stderr, "%s\t%d\n", stack, samples) } } func cpuHogger(f func(x int) int, y *int, dur time.Duration) { // We only need to get one 100 Hz clock tick, so we've got // a large safety buffer. // But do at least 500 iterations (which should take about 100ms), // otherwise TestCPUProfileMultithreaded can fail if only one
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 3.8K bytes - Viewed (0) -
src/runtime/debug/stack_test.go
func (t *T) ptrmethod() []byte { return Stack() } func (t T) method() []byte { return t.ptrmethod() } /* The traceback should look something like this, modulo line numbers and hex constants. Don't worry much about the base levels, but check the ones in our own package. goroutine 10 [running]: runtime/debug.Stack(0x0, 0x0, 0x0) /Users/r/go/src/runtime/debug/stack.go:28 +0x80
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 16 15:19:04 UTC 2024 - 5.5K bytes - Viewed (0) -
src/go/types/mono.go
// vertices we visited earlier cannot be part of the cycle. for stack[0] != v { stack = stack[1:] } // TODO(mdempsky): Pivot stack so we report the cycle from the top? err := check.newError(InvalidInstanceCycle) obj0 := check.mono.vertices[v].obj err.addf(obj0, "instantiation cycle:") qf := RelativeTo(check.pkg) for _, v := range stack { edge := check.mono.edges[check.mono.vertices[v].pre]
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Apr 03 18:48:38 UTC 2024 - 9.2K bytes - Viewed (0) -
src/cmd/trace/viewer.go
package main import ( "fmt" "internal/trace" "internal/trace/traceviewer" "time" ) // viewerFrames returns the frames of the stack of ev. The given frame slice is // used to store the frames to reduce allocations. func viewerFrames(stk trace.Stack) []*trace.Frame { var frames []*trace.Frame stk.Frames(func(f trace.StackFrame) bool { frames = append(frames, &trace.Frame{ PC: f.PC, Fn: f.Func,
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 1.4K bytes - Viewed (0) -
src/internal/abi/escape.go
// ensure that it's truly safe for p to not escape to the heap by // maintaining runtime pointer invariants (for example, that globals // and the heap may not generally point into a stack). // //go:nosplit //go:nocheckptr func NoEscape(p unsafe.Pointer) unsafe.Pointer { x := uintptr(p) return unsafe.Pointer(x ^ 0) } var alwaysFalse bool var escapeSink any
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 18 21:25:11 UTC 2024 - 884 bytes - Viewed (0) -
src/cmd/vendor/golang.org/x/telemetry/counter/counter.go
type Counter = counter.Counter // A StackCounter is the in-memory knowledge about a stack counter. // StackCounters are more expensive to use than regular Counters, // requiring, at a minimum, a call to runtime.Callers. type StackCounter = counter.StackCounter // NewStack returns a new stack counter with the given name and depth. // // See "Counter Naming" in the package doc for a description of counter naming
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Mar 15 18:02:34 UTC 2024 - 4.2K bytes - Viewed (0) -
src/runtime/asan.go
doasanwrite(addr, uintptr(len), sp, pc) } // Private interface for the runtime. const asanenabled = true // asan{read,write} are nosplit because they may be called between // fork and exec, when the stack must not grow. See issue #50391. //go:linkname asanread //go:nosplit func asanread(addr unsafe.Pointer, sz uintptr) { sp := getcallersp() pc := getcallerpc() doasanread(addr, sz, sp, pc) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Feb 13 20:39:58 UTC 2024 - 1.6K bytes - Viewed (0) -
src/internal/trace/parser.go
EvGoBlockRecv = 23 // goroutine blocks on chan recv [timestamp, stack] EvGoBlockSelect = 24 // goroutine blocks on select [timestamp, stack] EvGoBlockSync = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack] EvGoBlockCond = 26 // goroutine blocks on Cond [timestamp, stack] EvGoBlockNet = 27 // goroutine blocks on network [timestamp, stack] EvGoSysCall = 28 // syscall enter [timestamp, stack]
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:31:04 UTC 2024 - 4.7K bytes - Viewed (0)