- Sort Score
- Result 10 results
- Languages All
Results 71 - 80 of 348 for Gomaxprocs (0.36 sec)
-
src/cmd/dist/main.go
maxbg = min(maxbg, runtime.NumCPU()) } // For deterministic make.bash debugging and for smallest-possible footprint, // pay attention to GOMAXPROCS=1. This was a bad idea for 1.4 bootstrap, but // the bootstrap version is now 1.17+ and thus this is fine. if runtime.GOMAXPROCS(0) == 1 { maxbg = 1 } bginit() if len(os.Args) > 1 && os.Args[1] == "-check-goarm" { useVFPv1() // might fail with SIGILL
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Jun 22 19:44:52 UTC 2023 - 5.5K bytes - Viewed (0) -
src/expvar/expvar_test.go
bytesSent Int bytesRead Int ) // The benchmark creates GOMAXPROCS client/server pairs. // Each pair creates 4 goroutines: client reader/writer and server reader/writer. // The benchmark stresses concurrent reading and writing to the same connection. // Such pattern is used in net/http and net/rpc. b.StopTimer() P := runtime.GOMAXPROCS(0) N := b.N / P W := 1000 // Setup P client/server connections.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 04 14:46:19 UTC 2024 - 13.4K bytes - Viewed (0) -
test/fixedbugs/issue9110.go
// Scenario that used to leak arbitrarily many SudoG structs. // See golang.org/issue/9110. package main import ( "runtime" "runtime/debug" "sync" "time" ) func main() { runtime.GOMAXPROCS(1) debug.SetGCPercent(1000000) // only GC when we ask for GC var stats, stats1, stats2 runtime.MemStats release := func() {} for i := 0; i < 20; i++ { if i == 10 { // Should be warmed up by now.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 02 13:43:18 UTC 2016 - 1.7K bytes - Viewed (0) -
src/runtime/metrics/description.go
"the GC. Even if only one thread is running during the pause, this is " + "computed as GOMAXPROCS times the pause latency because nothing else " + "can be executing. This is the exact sum of samples in " + "/sched/pauses/total/gc:seconds if each sample is multiplied by " + "GOMAXPROCS at the time it is taken. This metric is an overestimate, " +
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Dec 06 17:59:12 UTC 2023 - 19.6K bytes - Viewed (0) -
test/closure.go
} g() c <- int(x8) c <- int(x64) c <- int(z) } x8 = 101 x64 = 201 f(500) } func newfunc() func(int) int { return func(x int) int { return x } } func main() { runtime.GOMAXPROCS(1) var fail bool go f() check([]int{1, 4, 5, 4}) a := accum(0) b := accum(1) go g(a, b) check([]int{2, 4, 6, 9}) go h() check([]int{100, 200, 101, 201, 500, 101, 201, 500})
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sun Jul 01 17:59:50 UTC 2012 - 1.7K bytes - Viewed (0) -
src/cmd/cgo/internal/testsanitizers/testdata/tsan13.go
import ( "io" "runtime" "runtime/pprof" "unsafe" ) func main() { runtime.SetCgoTraceback(0, unsafe.Pointer(C.tsanTraceback), nil, nil) pprof.StartCPUProfile(io.Discard) C.runThreads(C.int(runtime.GOMAXPROCS(0))) pprof.StopCPUProfile()
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 12 11:59:56 UTC 2023 - 1.7K bytes - Viewed (0) -
test/fixedbugs/issue13160.go
) const N = 100000 func main() { // Allocate more Ps than processors. This raises // the chance that we get interrupted by the OS // in exactly the right (wrong!) place. p := runtime.NumCPU() runtime.GOMAXPROCS(2 * p) // Allocate some pointers. ptrs := make([]*int, p) for i := 0; i < p; i++ { ptrs[i] = new(int) } // Arena where we read and write pointers like crazy. collider := make([]*int, p)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 02 13:43:18 UTC 2016 - 1.5K bytes - Viewed (0) -
src/iter/pull_test.go
// the same 100 times in a row. This should be more than enough to // ensure all goroutines get a chance to run to completion (or to // some block point) for a small group of test goroutines. defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1)) c := 0 ng := runtime.NumGoroutine() for i := 0; i < 1000; i++ { nng := runtime.NumGoroutine() if nng == ng { c++ } else { c = 0 ng = nng }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 19:09:28 UTC 2024 - 10.2K bytes - Viewed (0) -
src/cmd/trace/threadgen.go
} // TODO(mknyszek): Consider modeling procs differently and have them be // transition to and from NotExist when GOMAXPROCS changes. We can emit // events for this to clearly delineate GOMAXPROCS changes. if viewerEv.Name != "" { ctx.Instant(viewerEv) } } func (g *threadGenerator) ProcRange(ctx *traceContext, ev *trace.Event) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 5.7K bytes - Viewed (0) -
src/testing/benchmark_test.go
iters := uint64(0) b.SetParallelism(3) b.RunParallel(func(pb *testing.PB) { atomic.AddUint32(&procs, 1) for pb.Next() { atomic.AddUint64(&iters, 1) } }) if want := uint32(3 * runtime.GOMAXPROCS(0)); procs != want { t.Errorf("got %v procs, want %v", procs, want) } if iters != uint64(b.N) { t.Errorf("got %v iters, want %v", iters, b.N) } }) } func TestRunParallelFail(t *testing.T) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:00:11 UTC 2024 - 5.6K bytes - Viewed (0)