- Sort Score
- Result 10 results
- Languages All
Results 1 - 8 of 8 for GetLock (0.11 sec)
-
internal/dsync/dsync_test.go
} dm := NewDRWMutex(ds, "aap") dm.refreshInterval = testDrwMutexRefreshInterval ctx, cancel := context.WithCancel(context.Background()) if !dm.GetLock(ctx, cancel, id, source, Options{Timeout: 5 * time.Minute}) { t.Fatal("GetLock() should be successful") } // Make it run twice. timer := time.NewTimer(testDrwMutexRefreshInterval * 2) select { case <-ctx.Done():
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Wed Jun 19 14:35:19 UTC 2024 - 11.1K bytes - Viewed (0) -
cmd/erasure-multipart.go
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Sun Sep 29 22:40:36 UTC 2024 - 44.7K bytes - Viewed (0) -
internal/dsync/drwmutex.go
Timeout time.Duration RetryInterval time.Duration } // GetLock tries to get a write lock on dm before the timeout elapses. // // If the lock is already in use, the calling go routine // blocks until either the mutex becomes available and return success or // more time has passed than the timeout value and return false.
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Mon Sep 09 15:49:49 UTC 2024 - 20.4K bytes - Viewed (0) -
cmd/batch-rotate.go
versionSuspended := globalBucketVersioningSys.PrefixSuspended(srcBucket, srcObject) lock := api.NewNSLock(r.Bucket, objInfo.Name) lkctx, err := lock.GetLock(ctx, globalOperationTimeout) if err != nil { return err } ctx = lkctx.Context() defer lock.Unlock(lkctx) opts := ObjectOptions{ VersionID: objInfo.VersionID,
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Wed Sep 18 17:59:03 UTC 2024 - 14.7K bytes - Viewed (0) -
cmd/erasure-healing.go
Bucket: bucket, Object: object, VersionID: versionID, DiskCount: len(storageDisks), } if !opts.NoLock { lk := er.NewNSLock(bucket, object) lkctx, err := lk.GetLock(ctx, globalOperationTimeout) if err != nil { return result, err } ctx = lkctx.Context() defer lk.Unlock(lkctx) } // Re-read when we have lock...
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Wed Oct 02 17:50:41 UTC 2024 - 34.4K bytes - Viewed (0) -
cmd/background-newdisks-heal-ops.go
return err } } // Prevent parallel erasure set healing locker := z.NewNSLock(minioMetaBucket, fmt.Sprintf("new-drive-healing/%d/%d", poolIdx, setIdx)) lkctx, err := locker.GetLock(ctx, newDiskHealingTimeout) if err != nil { return fmt.Errorf("Healing of drive '%v' on %s pool, belonging to %s erasure set already in progress: %w", disk, humanize.Ordinal(poolIdx+1), humanize.Ordinal(setIdx+1), err)
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Sat Oct 26 09:58:27 UTC 2024 - 16.6K bytes - Viewed (0) -
cmd/erasure-server-pool-rebalance.go
rebalSaveStoppedAt ) func (z *erasureServerPools) saveRebalanceStats(ctx context.Context, poolIdx int, opts rebalSaveOpts) error { lock := z.serverPools[0].NewNSLock(minioMetaBucket, rebalMetaName) lkCtx, err := lock.GetLock(ctx, globalOperationTimeout) if err != nil { rebalanceLogIf(ctx, fmt.Errorf("failed to acquire write lock on %s/%s: %w", minioMetaBucket, rebalMetaName, err)) return err } defer lock.Unlock(lkCtx)
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Sep 06 13:20:19 UTC 2024 - 28.4K bytes - Viewed (0) -
cmd/data-scanner.go
// The function will block until the context is canceled. // There should only ever be one scanner running per cluster. func runDataScanner(ctx context.Context, objAPI ObjectLayer) { ctx, cancel := globalLeaderLock.GetLock(ctx) defer cancel() // Load current bloom cycle var cycleInfo currentScannerCycle buf, _ := readConfig(ctx, objAPI, dataUsageBloomNamePath) if len(buf) == 8 {
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Tue Oct 22 21:10:34 UTC 2024 - 48.4K bytes - Viewed (0)