- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 24 for NewTimer (0.1 sec)
-
cmd/tier.go
randInterval := func() time.Duration { return time.Duration(r.Float64() * 5 * float64(time.Second)) } // To avoid all MinIO nodes reading the tier config object at the same // time. t := time.NewTimer(tierCfgRefresh + randInterval()) defer t.Stop() for { select { case <-ctx.Done(): return case <-t.C: err := config.Reload(ctx, objAPI) if err != nil { tierLogIf(ctx, err)
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Thu Sep 12 20:44:05 UTC 2024 - 15.7K bytes - Viewed (0) -
cmd/background-newdisks-heal-ops.go
func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools) { // Perform automatic disk healing when a disk is replaced locally. diskCheckTimer := time.NewTimer(defaultMonitorNewDiskInterval) defer diskCheckTimer.Stop() for { select { case <-ctx.Done(): return case <-diskCheckTimer.C:
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Sat Oct 26 09:58:27 UTC 2024 - 16.6K bytes - Viewed (0) -
cmd/data-scanner.go
} else if len(buf) > 8 { cycleInfo.next = binary.LittleEndian.Uint64(buf[:8]) buf = buf[8:] _, err := cycleInfo.UnmarshalMsg(buf) bugLogIf(ctx, err) } scannerTimer := time.NewTimer(scannerCycle.Load()) defer scannerTimer.Stop() defer globalScannerMetrics.setCycle(nil) for { select { case <-ctx.Done(): return case <-scannerTimer.C:
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Tue Oct 22 21:10:34 UTC 2024 - 48.4K bytes - Viewed (0) -
doc/godebug.md
(synchronous), which makes correct use of the [`Timer.Stop`](/pkg/time/#Timer.Stop) and [`Timer.Reset`](/pkg/time/#Timer.Reset) method results much easier. The [`asynctimerchan` setting](/pkg/time/#NewTimer) disables this change. There are no runtime metrics for this change, This setting may be removed in a future release, Go 1.27 at the earliest.
Registered: Tue Nov 05 11:13:11 UTC 2024 - Last Modified: Mon Oct 28 14:46:33 UTC 2024 - 17.2K bytes - Viewed (0) -
internal/dsync/drwmutex.go
ctx, cancel := context.WithCancel(context.Background()) dm.m.Lock() dm.cancelRefresh = cancel dm.m.Unlock() go func() { defer cancel() refreshTimer := time.NewTimer(dm.refreshInterval) defer refreshTimer.Stop() for { select { case <-ctx.Done(): return case <-refreshTimer.C: noQuorum, err := refreshLock(ctx, dm.clnt, id, source, quorum)
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Mon Sep 09 15:49:49 UTC 2024 - 20.4K bytes - Viewed (0) -
cmd/erasure-server-pool-rebalance.go
// time. r := rand.New(rand.NewSource(time.Now().UnixNano())) randSleepFor := func() time.Duration { return 5*time.Second + time.Duration(float64(5*time.Second)*r.Float64()) } timer := time.NewTimer(randSleepFor()) defer timer.Stop() var ( quit bool traceMsg string ) for { select { case rebalErr := <-doneCh: quit = true now := time.Now()
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Sep 06 13:20:19 UTC 2024 - 28.4K bytes - Viewed (0) -
cmd/bucket-replication.go
// PersistToDisk persists in-memory resync metadata stats to disk at periodic intervals func (s *replicationResyncer) PersistToDisk(ctx context.Context, objectAPI ObjectLayer) { resyncTimer := time.NewTimer(resyncTimeInterval) defer resyncTimer.Stop() // For each bucket name, store the last timestamp of the // successful save of replication status in the backend disks.
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Thu Oct 10 06:49:55 UTC 2024 - 116.1K bytes - Viewed (0) -
cmd/xl-storage-disk-id-check.go
} // Offset checks a bit. time.Sleep(time.Duration(rng.Int63n(int64(1 * time.Second)))) dctx, dcancel := context.WithCancel(ctx) started := time.Now() go func() { timeout := time.NewTimer(globalDriveConfig.GetMaxTimeout()) select { case <-dctx.Done(): if !timeout.Stop() { <-timeout.C } case <-timeout.C: spent := time.Since(started)
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Sat Oct 26 09:56:26 UTC 2024 - 34.5K bytes - Viewed (0) -
cmd/batch-handlers.go
go func() { jpool.resume(randomWait) jpool.cleanupReports(randomWait) }() return jpool } func (j *BatchJobPool) cleanupReports(randomWait func() time.Duration) { t := time.NewTimer(randomWait()) defer t.Stop() for { select { case <-GlobalContext.Done(): return case <-t.C: results := make(chan itemOrErr[ObjectInfo], 100)
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Oct 18 15:32:09 UTC 2024 - 62.2K bytes - Viewed (0) -
cmd/admin-handlers.go
// Start profiling locally as well. prof, err := startProfiler(profiler) if err == nil { globalProfiler[profiler] = prof } } globalProfilerMu.Unlock() timer := time.NewTimer(duration) defer timer.Stop() for { select { case <-ctx.Done(): // Stop remote profiles go globalNotificationSys.DownloadProfilingData(GlobalContext, io.Discard) // Stop local
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Oct 04 11:32:32 UTC 2024 - 99.7K bytes - Viewed (0)