- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 16 for GOMAXPROCS (0.64 sec)
-
internal/lsync/lrwmutex_test.go
clocked <- true <-cunlock m.RUnlock() cdone <- true } } // Borrowed from rwmutex_test.go func doTestParallelReaders(numReaders, gomaxprocs int) { runtime.GOMAXPROCS(gomaxprocs) m := NewLRWMutex() clocked := make(chan bool) cunlock := make(chan bool) cdone := make(chan bool) for range numReaders {
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 7.8K bytes - Viewed (0) -
internal/dsync/drwmutex_test.go
} } cdone <- true } // Borrowed from rwmutex_test.go func hammerRWMutex(t *testing.T, gomaxprocs, numReaders, numIterations int) { t.Run(fmt.Sprintf("%d-%d-%d", gomaxprocs, numReaders, numIterations), func(t *testing.T) { resource := "test" runtime.GOMAXPROCS(gomaxprocs) // Number of active readers + 10000 * number of active writers. var activity int32 cdone := make(chan bool)
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 9.4K bytes - Viewed (0) -
internal/grid/benchmark_test.go
errFatal(err) // Wait for all to connect // Parallel writes per server. b.Run("bytes", func(b *testing.B) { for par := 1; par <= 32; par *= 2 { b.Run("par="+strconv.Itoa(par*runtime.GOMAXPROCS(0)), func(b *testing.B) { defer timeout(60 * time.Second)() ctx, cancel := context.WithTimeout(b.Context(), 30*time.Second) defer cancel() b.ReportAllocs() b.SetBytes(int64(len(payload) * 2))
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 15.5K bytes - Viewed (0) -
cmd/erasure.go
} cache.replace(v.Name, v.Parent, v.Entry) cache.Info.LastUpdate = time.Now() } } }() // Restrict parallelism for disk usage scanner // upto GOMAXPROCS if GOMAXPROCS is < len(disks) maxProcs := runtime.GOMAXPROCS(0) if maxProcs < len(disks) { disks = disks[:maxProcs] } // Start one scanner per disk var wg sync.WaitGroup wg.Add(len(disks)) for i := range disks {
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 16.1K bytes - Viewed (0) -
cmd/dynamic-timeouts_test.go
} } func TestDynamicTimeoutConcurrent(t *testing.T) { // Race test. timeout := newDynamicTimeout(time.Second, time.Millisecond) var wg sync.WaitGroup for i := 0; i < runtime.GOMAXPROCS(0); i++ { wg.Add(1) rng := rand.New(rand.NewSource(int64(i))) go func() { defer wg.Done() for range 100 { for range 100 {
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 5.2K bytes - Viewed (0) -
internal/config/errors.go
"", "MINIO_API_REPLICATION_WORKERS: should be > 0", ) ErrInvalidTransitionWorkersValue = newErrFn( "Invalid value for transition workers", "", "MINIO_API_TRANSITION_WORKERS: should be >= GOMAXPROCS/2", ) ErrInvalidBatchKeyRotationWorkersWait = newErrFn( "Invalid value for batch key rotation workers wait", "Please input a non-negative duration", "keyrotation_workers_wait should be > 0ms", )
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Tue Aug 12 18:20:36 UTC 2025 - 9.4K bytes - Viewed (0) -
cmd/handler-api.go
MaxLWorkers: t.replicationMaxLWorkers, } } func (t *apiConfig) getTransitionWorkers() int { t.mu.RLock() defer t.mu.RUnlock() if t.transitionWorkers <= 0 { return runtime.GOMAXPROCS(0) / 2 } return t.transitionWorkers } func (t *apiConfig) isSyncEventsEnabled() bool { t.mu.RLock() defer t.mu.RUnlock() return t.syncEvents }
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 10.4K bytes - Viewed (0) -
internal/event/targetlist.go
TotalEvents: stats.totalEvents, } } return t } func (list *TargetList) startSendWorkers(workerCount int) { if workerCount == 0 { workerCount = runtime.GOMAXPROCS(0) } wk, err := workers.New(workerCount) if err != nil { panic(err) } for range workerCount { wk.Take() go func() { defer wk.Give() for { select {
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 9.2K bytes - Viewed (0) -
cmd/batch-rotate.go
return false } } return true } workerSize, err := strconv.Atoi(env.Get("_MINIO_BATCH_KEYROTATION_WORKERS", strconv.Itoa(runtime.GOMAXPROCS(0)/2))) if err != nil { return err } wk, err := workers.New(workerSize) if err != nil { // invalid worker size. return err } ctx, cancel := context.WithCancel(ctx)
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 14.7K bytes - Viewed (0) -
doc/godebug.md
runtime will consider cgroup CPU limits when setting the default GOMAXPROCS. The default value `containermaxprocs=1` will use cgroup limits in addition to the total logical CPU count and CPU affinity. `containermaxprocs=0` will disable consideration of cgroup limits. This setting only affects Linux. Go 1.25 added a new `updatemaxprocs` setting that controls whether the Go runtime will periodically update GOMAXPROCS for new CPU affinity or cgroup
Registered: Tue Dec 30 11:13:12 UTC 2025 - Last Modified: Wed Dec 03 00:18:09 UTC 2025 - 24.7K bytes - Viewed (0)