- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 29 for tracebackPCs (0.3 sec)
-
src/runtime/runtime1.go
var traceback_cache uint32 = 2 << tracebackShift var traceback_env uint32 // gotraceback returns the current traceback settings. // // If level is 0, suppress all tracebacks. // If level is 1, show tracebacks, but exclude runtime frames. // If level is 2, show tracebacks including runtime frames. // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine. // If crash is set, crash (core dump, etc) after tracebacking.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:52:17 UTC 2024 - 19.3K bytes - Viewed (0) -
src/runtime/proc.go
u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack) } else { u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack) } n += tracebackPCs(&u, 0, stk[n:]) if n <= 0 { // Normal traceback is impossible or has failed. // Account it against abstract "System" or "GC". n = 2 if inVDSOPage(pc) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 207.5K bytes - Viewed (0) -
src/cmd/internal/obj/pcln.go
func (s *pcinlineState) addBranch(ctxt *Link, globalIndex int) int { if globalIndex < 0 { return -1 } localIndex, ok := s.globalToLocal[globalIndex] if ok { return localIndex } // Since tracebacks don't include column information, we could // use one node for multiple calls of the same function on the // same line (e.g., f(x) + f(y)). For now, we use one node for // each inlined call.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Oct 31 20:45:15 UTC 2022 - 11.8K bytes - Viewed (0) -
src/runtime/runtime_test.go
// not be skipped if only -quick is used. var flagQuick = flag.Bool("quick", false, "skip slow tests, for cmd/dist test runtime:cpu124") func init() { // We're testing the runtime, so make tracebacks show things // in the runtime. This only raises the level, so it won't // override GOTRACEBACK=crash from the user. SetTracebackEnv("system") } var errf error func errfn() error { return errf }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:00:11 UTC 2024 - 11.7K bytes - Viewed (0) -
src/cmd/compile/internal/ir/func.go
} // A ScopeID represents a lexical scope within a function. type ScopeID int32 const ( funcDupok = 1 << iota // duplicate definitions ok funcWrapper // hide frame from users (elide in tracebacks, don't count as a frame for recover()) funcABIWrapper // is an ABI wrapper (also set flagWrapper) funcNeedctxt // function uses context register (has closure variables)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:05:44 UTC 2024 - 21.1K bytes - Viewed (0) -
src/runtime/traceback_test.go
func (w ttiWrapper) m1() *ttiResult { return ttiLeaf() } //go:noinline func ttiExcluded1() *ttiResult { return ttiExcluded2() } // ttiExcluded2 should be excluded from tracebacks. There are // various ways this could come up. Linking it to a "runtime." name is // rather synthetic, but it's easy and reliable. See issue #42754 for // one way this happened in real code. //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Dec 14 17:22:18 UTC 2023 - 22.9K bytes - Viewed (0) -
src/runtime/extern.go
schedtrace: setting schedtrace=X causes the scheduler to emit a single line to standard error every X milliseconds, summarizing the scheduler state. tracebackancestors: setting tracebackancestors=N extends tracebacks with the stacks at which goroutines were created, where N limits the number of ancestor goroutines to report. This also extends the information returned by runtime.Stack. Setting N to 0 will report no ancestry information.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:52:17 UTC 2024 - 18.9K bytes - Viewed (0) -
src/runtime/pprof/proto.go
if l, ok := b.locs[addr]; ok { // When generating code for an inlined function, the compiler adds // NOP instructions to the outermost function as a placeholder for // each layer of inlining. When the runtime generates tracebacks for // stacks that include inlined functions, it uses the addresses of // those NOPs as "fake" PCs on the stack as if they were regular // function call sites. But if a profiling signal arrives while the
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Dec 13 20:40:52 UTC 2023 - 25.7K bytes - Viewed (0) -
src/runtime/cgocall.go
// memory. It checks whether that Go memory contains any other // pointer into unpinned Go memory. If it does, we panic. // The return values are unused but useful to see in panic tracebacks. func cgoCheckUnknownPointer(p unsafe.Pointer, msg string) (base, i uintptr) { if inheap(uintptr(p)) { b, span, _ := findObject(uintptr(p), 0, 0) base = b if base == 0 { return }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:16:47 UTC 2024 - 24.2K bytes - Viewed (0) -
src/runtime/asm_mips64x.s
MOVV m_curg(R1), g JAL runtime·save_g(SB) MOVV (g_sched+gobuf_sp)(g), R29 MOVV R0, (g_sched+gobuf_sp)(g) RET noswitch: // already on m stack, just call directly // Using a tail call here cleans up tracebacks since we won't stop // at an intermediate systemstack. MOVV 0(REGCTXT), R4 // code pointer MOVV 0(R29), R31 // restore LR ADDV $8, R29 JMP (R4) // func switchToCrashStack0(fn func())
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Nov 06 19:45:59 UTC 2023 - 24.3K bytes - Viewed (0)