- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 1,946 for Stack0 (0.11 sec)
-
src/runtime/testdata/testprogcgo/stackswitch.go
// We want to trigger a bounds check on the g0 stack. To do this, we // need to call a splittable function through systemstack(). // SetGCPercent contains such a systemstack call. gogc := debug.SetGCPercent(100) debug.SetGCPercent(gogc) } // Regression test for https://go.dev/issue/62440. It should be possible for C // threads to call into Go from different stacks without crashing due to g0 // stack bounds checks. //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Nov 17 15:33:38 UTC 2023 - 1.1K bytes - Viewed (0) -
src/runtime/os_netbsd.go
// signal stack of the creating thread. We always create a // new signal stack here, to avoid having two Go threads using // the same signal stack. This breaks the case of a thread // created in C that calls sigaltstack and then calls a Go // function, because we will lose track of the C code's // sigaltstack, but it's the best we can do. signalstack(&gp.m.gsignal.stack) gp.m.newSigstack = true
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 10.1K bytes - Viewed (0) -
src/runtime/cpuprof.go
on bool // profiling is on log *profBuf // profile events written here // extra holds extra stacks accumulated in addNonGo // corresponding to profiling signals arriving on // non-Go-created threads. Those stacks are written // to log the next time a normal Go thread gets the // signal handler. // Assuming the stacks are 2 words each (we don't get // a full traceback from those threads), plus one word
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 8.5K bytes - Viewed (0) -
src/runtime/HACKING.md
be preempted, that must not grow the user stack, or that switch user goroutines. Code running on the system stack is implicitly non-preemptible and the garbage collector does not scan system stacks. While running on the system stack, the current user stack is not used for execution. nosplit functions ----------------- Most functions start with a prologue that inspects the stack pointer and the current G's stack bound and calls `morestack` if the stack
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 13.9K bytes - Viewed (0) -
src/runtime/testdata/testprogcgo/sigstack.go
// license that can be found in the LICENSE file. //go:build !plan9 && !windows // +build !plan9,!windows // Test handling of Go-allocated signal stacks when calling from // C-created threads with and without signal stacks. (See issue // #22930.) package main /* #include <pthread.h> #include <signal.h> #include <stdio.h> #include <stdlib.h> #include <sys/mman.h> #ifdef _AIX
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Dec 13 18:45:54 UTC 2021 - 2.1K bytes - Viewed (0) -
src/cmd/vendor/golang.org/x/telemetry/internal/upload/reports.go
Counters: make(map[string]int64), Stacks: make(map[string]int64), } upload.Programs = append(upload.Programs, x) for k, v := range p.Counters { if cfg.HasCounter(p.Program, k) && report.X <= cfg.Rate(p.Program, k) { x.Counters[k] = v } } // and the same for Stacks // this can be made more efficient, when it matters for k, v := range p.Stacks { before, _, _ := strings.Cut(k, "\n")
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jun 04 14:52:56 UTC 2024 - 10.3K bytes - Viewed (0) -
src/runtime/signal_unix.go
// alternate signal stack. If the alternate signal stack is not set // for the thread (the normal case) then set the alternate signal // stack to the gsignal stack. If the alternate signal stack is set // for the thread (the case when a non-Go thread sets the alternate // signal stack and then calls a Go function) then set the gsignal // stack to the alternate signal stack. We also set the alternate
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 10 16:04:54 UTC 2024 - 45K bytes - Viewed (0) -
src/runtime/os_freebsd.go
func newosproc(mp *m) { stk := unsafe.Pointer(mp.g0.stack.hi) if false { print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " thr_start=", abi.FuncPCABI0(thr_start), " id=", mp.id, " ostk=", &mp, "\n") } param := thrparam{ start_func: abi.FuncPCABI0(thr_start), arg: unsafe.Pointer(mp), stack_base: mp.g0.stack.lo, stack_size: uintptr(stk) - mp.g0.stack.lo, child_tid: nil, // minit will record tid
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Dec 05 20:34:30 UTC 2023 - 11.6K bytes - Viewed (0) -
src/runtime/traceallocfree.go
x := s.base() + i*s.elemsize trace.HeapObjectExists(x, s.typePointersOfUnchecked(x).typ) } abits.advance() } } // Write out all the goroutine stacks. forEachGRace(func(gp *g) { trace.GoroutineStackExists(gp.stack.lo, gp.stack.hi-gp.stack.lo) }) traceRelease(trace) } func traceSpanTypeAndClass(s *mspan) traceArg { if s.state.get() == mSpanInUse { return traceArg(s.spanclass) << 1 }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 20:32:51 UTC 2024 - 5.9K bytes - Viewed (0) -
src/runtime/extern.go
which goroutines were created, where N limits the number of ancestor goroutines to report. This also extends the information returned by runtime.Stack. Setting N to 0 will report no ancestry information. tracefpunwindoff: setting tracefpunwindoff=1 forces the execution tracer to use the runtime's default stack unwinder instead of frame pointer unwinding.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:52:17 UTC 2024 - 18.9K bytes - Viewed (0)