- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 24 for tracebackPCs (0.21 sec)
-
src/runtime/traceback_test.go
func (w ttiWrapper) m1() *ttiResult { return ttiLeaf() } //go:noinline func ttiExcluded1() *ttiResult { return ttiExcluded2() } // ttiExcluded2 should be excluded from tracebacks. There are // various ways this could come up. Linking it to a "runtime." name is // rather synthetic, but it's easy and reliable. See issue #42754 for // one way this happened in real code. //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Dec 14 17:22:18 UTC 2023 - 22.9K bytes - Viewed (0) -
src/runtime/extern.go
schedtrace: setting schedtrace=X causes the scheduler to emit a single line to standard error every X milliseconds, summarizing the scheduler state. tracebackancestors: setting tracebackancestors=N extends tracebacks with the stacks at which goroutines were created, where N limits the number of ancestor goroutines to report. This also extends the information returned by runtime.Stack. Setting N to 0 will report no ancestry information.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:52:17 UTC 2024 - 18.9K bytes - Viewed (0) -
src/runtime/pprof/proto.go
if l, ok := b.locs[addr]; ok { // When generating code for an inlined function, the compiler adds // NOP instructions to the outermost function as a placeholder for // each layer of inlining. When the runtime generates tracebacks for // stacks that include inlined functions, it uses the addresses of // those NOPs as "fake" PCs on the stack as if they were regular // function call sites. But if a profiling signal arrives while the
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Dec 13 20:40:52 UTC 2023 - 25.7K bytes - Viewed (0) -
src/runtime/cgocall.go
// memory. It checks whether that Go memory contains any other // pointer into unpinned Go memory. If it does, we panic. // The return values are unused but useful to see in panic tracebacks. func cgoCheckUnknownPointer(p unsafe.Pointer, msg string) (base, i uintptr) { if inheap(uintptr(p)) { b, span, _ := findObject(uintptr(p), 0, 0) base = b if base == 0 { return }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:16:47 UTC 2024 - 24.2K bytes - Viewed (0) -
src/runtime/asm_mips64x.s
MOVV m_curg(R1), g JAL runtime·save_g(SB) MOVV (g_sched+gobuf_sp)(g), R29 MOVV R0, (g_sched+gobuf_sp)(g) RET noswitch: // already on m stack, just call directly // Using a tail call here cleans up tracebacks since we won't stop // at an intermediate systemstack. MOVV 0(REGCTXT), R4 // code pointer MOVV 0(R29), R31 // restore LR ADDV $8, R29 JMP (R4) // func switchToCrashStack0(fn func())
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Nov 06 19:45:59 UTC 2023 - 24.3K bytes - Viewed (0) -
src/runtime/asm_riscv64.s
MOV m_curg(T0), g CALL runtime·save_g(SB) MOV (g_sched+gobuf_sp)(g), X2 MOV ZERO, (g_sched+gobuf_sp)(g) RET noswitch: // already on m stack, just call directly // Using a tail call here cleans up tracebacks since we won't stop // at an intermediate systemstack. MOV 0(CTXT), T1 // code pointer ADD $8, X2 JMP (T1) // func switchToCrashStack0(fn func())
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Nov 09 13:57:06 UTC 2023 - 27K bytes - Viewed (0) -
src/runtime/asm_s390x.s
MOVD m_curg(R3), g BL runtime·save_g(SB) MOVD (g_sched+gobuf_sp)(g), R15 MOVD $0, (g_sched+gobuf_sp)(g) RET noswitch: // already on m stack, just call directly // Using a tail call here cleans up tracebacks since we won't stop // at an intermediate systemstack. MOVD 0(R12), R3 // code pointer MOVD 0(R15), LR // restore LR ADD $8, R15 BR (R3) // func switchToCrashStack0(fn func())
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Jan 25 09:18:28 UTC 2024 - 28.1K bytes - Viewed (0) -
src/runtime/asm_loong64.s
MOVV m_curg(R4), g JAL runtime·save_g(SB) MOVV (g_sched+gobuf_sp)(g), R3 MOVV R0, (g_sched+gobuf_sp)(g) RET noswitch: // already on m stack, just call directly // Using a tail call here cleans up tracebacks since we won't stop // at an intermediate systemstack. MOVV 0(REGCTXT), R4 // code pointer MOVV 0(R3), R1 // restore LR ADDV $8, R3 JMP (R4) // func switchToCrashStack0(fn func())
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 13 15:04:25 UTC 2024 - 26.5K bytes - Viewed (0) -
src/runtime/asm_mipsx.s
MOVW m_curg(R1), g JAL runtime·save_g(SB) MOVW (g_sched+gobuf_sp)(g), R29 MOVW R0, (g_sched+gobuf_sp)(g) RET noswitch: // already on m stack, just call directly // Using a tail call here cleans up tracebacks since we won't stop // at an intermediate systemstack. MOVW 0(REGCTXT), R4 // code pointer MOVW 0(R29), R31 // restore LR ADD $4, R29 JMP (R4) // func switchToCrashStack0(fn func())
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 06 11:46:29 UTC 2024 - 26.3K bytes - Viewed (0) -
src/runtime/asm_arm.s
MOVW g_m(g), R1 MOVW m_curg(R1), R0 BL setg<>(SB) MOVW (g_sched+gobuf_sp)(g), R13 MOVW $0, R3 MOVW R3, (g_sched+gobuf_sp)(g) RET noswitch: // Using a tail call here cleans up tracebacks since we won't stop // at an intermediate systemstack. MOVW R0, R7 MOVW 0(R0), R0 MOVW.P 4(R13), R14 // restore LR B (R0) // func switchToCrashStack0(fn func())
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Feb 23 21:00:52 UTC 2024 - 32.1K bytes - Viewed (0)