- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 65 for Atack (0.11 sec)
-
src/internal/trace/testdata/testprog/cpu-profile.go
pprofStacks[stack] += samples } } for stack, samples := range pprofStacks { fmt.Fprintf(os.Stderr, "%s\t%d\n", stack, samples) } } func cpuHogger(f func(x int) int, y *int, dur time.Duration) { // We only need to get one 100 Hz clock tick, so we've got // a large safety buffer. // But do at least 500 iterations (which should take about 100ms), // otherwise TestCPUProfileMultithreaded can fail if only one
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 3.8K bytes - Viewed (0) -
src/runtime/debug/stack_test.go
func (t *T) ptrmethod() []byte { return Stack() } func (t T) method() []byte { return t.ptrmethod() } /* The traceback should look something like this, modulo line numbers and hex constants. Don't worry much about the base levels, but check the ones in our own package. goroutine 10 [running]: runtime/debug.Stack(0x0, 0x0, 0x0) /Users/r/go/src/runtime/debug/stack.go:28 +0x80
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 16 15:19:04 UTC 2024 - 5.5K bytes - Viewed (0) -
src/cmd/trace/viewer.go
package main import ( "fmt" "internal/trace" "internal/trace/traceviewer" "time" ) // viewerFrames returns the frames of the stack of ev. The given frame slice is // used to store the frames to reduce allocations. func viewerFrames(stk trace.Stack) []*trace.Frame { var frames []*trace.Frame stk.Frames(func(f trace.StackFrame) bool { frames = append(frames, &trace.Frame{ PC: f.PC, Fn: f.Func,
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 1.4K bytes - Viewed (0) -
src/internal/abi/escape.go
// ensure that it's truly safe for p to not escape to the heap by // maintaining runtime pointer invariants (for example, that globals // and the heap may not generally point into a stack). // //go:nosplit //go:nocheckptr func NoEscape(p unsafe.Pointer) unsafe.Pointer { x := uintptr(p) return unsafe.Pointer(x ^ 0) } var alwaysFalse bool var escapeSink any
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 18 21:25:11 UTC 2024 - 884 bytes - Viewed (0) -
src/internal/trace/parser.go
EvGoBlockRecv = 23 // goroutine blocks on chan recv [timestamp, stack] EvGoBlockSelect = 24 // goroutine blocks on select [timestamp, stack] EvGoBlockSync = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack] EvGoBlockCond = 26 // goroutine blocks on Cond [timestamp, stack] EvGoBlockNet = 27 // goroutine blocks on network [timestamp, stack] EvGoSysCall = 28 // syscall enter [timestamp, stack]
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:31:04 UTC 2024 - 4.7K bytes - Viewed (0) -
src/cmd/covdata/covdata.go
"os" "runtime" "runtime/pprof" "strings" ) var verbflag = flag.Int("v", 0, "Verbose trace output level") var hflag = flag.Bool("h", false, "Panic on fatal errors (for stack trace)") var hwflag = flag.Bool("hw", false, "Panic on warnings (for stack trace)") var indirsflag = flag.String("i", "", "Input dirs to examine (comma separated)") var pkgpatflag = flag.String("pkg", "", "Restrict output to package(s) matching specified package pattern.")
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 14 19:41:17 UTC 2024 - 5.7K bytes - Viewed (0) -
src/io/fs/glob_test.go
if err != path.ErrBadPattern { t.Errorf("Glob(fs, %#q) returned err=%v, want path.ErrBadPattern", pattern, err) } } } func TestCVE202230630(t *testing.T) { // Prior to CVE-2022-30630, a stack exhaustion would occur given a large // number of separators. There is now a limit of 10,000. _, err := Glob(os.DirFS("."), "/*"+strings.Repeat("/", 10001)) if err != path.ErrBadPattern {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 07 18:36:52 UTC 2024 - 2.3K bytes - Viewed (0) -
src/cmd/trace/procgen.go
gs.augmentName(st.Stack) // Handle the goroutine state transition. from, to := st.Goroutine() if from == to { // Filter out no-op events. return } if from == trace.GoRunning && !to.Executing() { if to == trace.GoWaiting { // Goroutine started blocking. gs.block(ev.Time(), ev.Stack(), st.Reason, ctx) } else { gs.stop(ev.Time(), ev.Stack(), ctx) } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 6.1K bytes - Viewed (0) -
src/runtime/defs_dragonfly_amd64.go
_EVFILT_USER = -0x9 _NOTE_TRIGGER = 0x1000000 ) type rtprio struct { _type uint16 prio uint16 } type lwpparams struct { start_func uintptr arg unsafe.Pointer stack uintptr tid1 unsafe.Pointer // *int32 tid2 unsafe.Pointer // *int32 } type sigset struct { __bits [4]uint32 } type stackt struct { ss_sp uintptr ss_size uintptr
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 12 21:17:22 UTC 2024 - 3.5K bytes - Viewed (0) -
src/runtime/cpuprof.go
} prof.signalLock.Store(0) } // addNonGo adds the non-Go stack trace to the profile. // It is called from a non-Go thread, so we cannot use much stack at all, // nor do anything that needs a g or an m. // In particular, we can't call cpuprof.log.write. // Instead, we copy the stack into cpuprof.extra, // which will be drained the next time a Go thread // gets the signal handling event. //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 8.5K bytes - Viewed (0)