- Sort Score
- Result 10 results
- Languages All
Results 1 - 8 of 8 for GetLock (1.12 sec)
-
internal/dsync/dsync_test.go
} dm := NewDRWMutex(ds, "aap") dm.refreshInterval = testDrwMutexRefreshInterval ctx, cancel := context.WithCancel(t.Context()) if !dm.GetLock(ctx, cancel, id, source, Options{Timeout: 5 * time.Minute}) { t.Fatal("GetLock() should be successful") } // Make it run twice. timer := time.NewTimer(testDrwMutexRefreshInterval * 2) select { case <-ctx.Done():
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 10.8K bytes - Viewed (0) -
cmd/erasure-multipart.go
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Oct 24 04:05:31 UTC 2025 - 47.1K bytes - Viewed (0) -
internal/dsync/drwmutex.go
Timeout time.Duration RetryInterval time.Duration } // GetLock tries to get a write lock on dm before the timeout elapses. // // If the lock is already in use, the calling go routine // blocks until either the mutex becomes available and return success or // more time has passed than the timeout value and return false.
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 20.3K bytes - Viewed (0) -
cmd/batch-rotate.go
versionSuspended := globalBucketVersioningSys.PrefixSuspended(srcBucket, srcObject) lock := api.NewNSLock(r.Bucket, objInfo.Name) lkctx, err := lock.GetLock(ctx, globalOperationTimeout) if err != nil { return err } ctx = lkctx.Context() defer lock.Unlock(lkctx) opts := ObjectOptions{ VersionID: objInfo.VersionID,
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 14.7K bytes - Viewed (0) -
cmd/erasure-healing.go
Bucket: bucket, Object: object, VersionID: versionID, DiskCount: len(storageDisks), } if !opts.NoLock { lk := er.NewNSLock(bucket, object) lkctx, err := lk.GetLock(ctx, globalOperationTimeout) if err != nil { return result, err } ctx = lkctx.Context() defer lk.Unlock(lkctx) } // Re-read when we have lock...
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 34.7K bytes - Viewed (0) -
cmd/background-newdisks-heal-ops.go
return err } } // Prevent parallel erasure set healing locker := z.NewNSLock(minioMetaBucket, fmt.Sprintf("new-drive-healing/%d/%d", poolIdx, setIdx)) lkctx, err := locker.GetLock(ctx, newDiskHealingTimeout) if err != nil { return fmt.Errorf("Healing of drive '%v' on %s pool, belonging to %s erasure set already in progress: %w", disk, humanize.Ordinal(poolIdx+1), humanize.Ordinal(setIdx+1), err)
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 16.5K bytes - Viewed (0) -
cmd/erasure-server-pool-rebalance.go
rebalSaveStoppedAt ) func (z *erasureServerPools) saveRebalanceStats(ctx context.Context, poolIdx int, opts rebalSaveOpts) error { lock := z.serverPools[0].NewNSLock(minioMetaBucket, rebalMetaName) lkCtx, err := lock.GetLock(ctx, globalOperationTimeout) if err != nil { rebalanceLogIf(ctx, fmt.Errorf("failed to acquire write lock on %s/%s: %w", minioMetaBucket, rebalMetaName, err)) return err } defer lock.Unlock(lkCtx)
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 28.7K bytes - Viewed (0) -
cmd/data-scanner.go
// The function will block until the context is canceled. // There should only ever be one scanner running per cluster. func runDataScanner(ctx context.Context, objAPI ObjectLayer) { ctx, cancel := globalLeaderLock.GetLock(ctx) defer cancel() // Load current bloom cycle var cycleInfo currentScannerCycle buf, _ := readConfig(ctx, objAPI, dataUsageBloomNamePath) if len(buf) == 8 {
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 45.5K bytes - Viewed (0)