- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 22 for getBlocks (0.08 sec)
-
cmd/notification.go
}, idx, *client.host) } return ng.Wait() } var errPeerNotReachable = errors.New("peer is not reachable") // GetLocks - makes GetLocks RPC call on all peers. func (sys *NotificationSys) GetLocks(ctx context.Context, r *http.Request) []*PeerLocks { locksResp := make([]*PeerLocks, len(sys.peerClients)) g := errgroup.WithNErrs(len(sys.peerClients))
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Mon Sep 09 16:58:30 UTC 2024 - 46.2K bytes - Viewed (0) -
cmd/admin-handlers.go
if err != nil { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } } stale := r.Form.Get("stale") == "true" // list also stale locks peerLocks := globalNotificationSys.GetLocks(ctx, r) topLocks := topLockEntries(peerLocks, stale) // Marshal API response upto requested count. if len(topLocks) > count && count > 0 { topLocks = topLocks[:count] }
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Oct 04 11:32:32 UTC 2024 - 99.7K bytes - Viewed (0) -
cmd/peer-rest-client.go
} // Close - marks the client as closed. func (client *peerRESTClient) Close() error { client.restClient.Close() return nil } // GetLocks - fetch older locks for a remote node. func (client *peerRESTClient) GetLocks(ctx context.Context) (lockMap map[string][]lockRequesterInfo, err error) { resp, err := getLocksRPC.Call(ctx, client.gridConn(), grid.NewMSS()) if err != nil || resp == nil {
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Mon Sep 09 16:58:30 UTC 2024 - 26.1K bytes - Viewed (0) -
cmd/erasure-multipart.go
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Sun Sep 29 22:40:36 UTC 2024 - 44.7K bytes - Viewed (0) -
cmd/erasure-healing.go
Bucket: bucket, Object: object, VersionID: versionID, DiskCount: len(storageDisks), } if !opts.NoLock { lk := er.NewNSLock(bucket, object) lkctx, err := lk.GetLock(ctx, globalOperationTimeout) if err != nil { return result, err } ctx = lkctx.Context() defer lk.Unlock(lkctx) } // Re-read when we have lock...
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Wed Oct 02 17:50:41 UTC 2024 - 34.4K bytes - Viewed (0) -
cmd/batch-rotate.go
versionSuspended := globalBucketVersioningSys.PrefixSuspended(srcBucket, srcObject) lock := api.NewNSLock(r.Bucket, objInfo.Name) lkctx, err := lock.GetLock(ctx, globalOperationTimeout) if err != nil { return err } ctx = lkctx.Context() defer lock.Unlock(lkctx) opts := ObjectOptions{ VersionID: objInfo.VersionID,
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Wed Sep 18 17:59:03 UTC 2024 - 14.7K bytes - Viewed (0) -
cmd/background-newdisks-heal-ops.go
return err } } // Prevent parallel erasure set healing locker := z.NewNSLock(minioMetaBucket, fmt.Sprintf("new-drive-healing/%d/%d", poolIdx, setIdx)) lkctx, err := locker.GetLock(ctx, newDiskHealingTimeout) if err != nil { return fmt.Errorf("Healing of drive '%v' on %s pool, belonging to %s erasure set already in progress: %w", disk, humanize.Ordinal(poolIdx+1), humanize.Ordinal(setIdx+1), err)
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Sat Oct 26 09:58:27 UTC 2024 - 16.6K bytes - Viewed (0) -
cmd/erasure-server-pool-rebalance.go
rebalSaveStoppedAt ) func (z *erasureServerPools) saveRebalanceStats(ctx context.Context, poolIdx int, opts rebalSaveOpts) error { lock := z.serverPools[0].NewNSLock(minioMetaBucket, rebalMetaName) lkCtx, err := lock.GetLock(ctx, globalOperationTimeout) if err != nil { rebalanceLogIf(ctx, fmt.Errorf("failed to acquire write lock on %s/%s: %w", minioMetaBucket, rebalMetaName, err)) return err } defer lock.Unlock(lkCtx)
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Sep 06 13:20:19 UTC 2024 - 28.4K bytes - Viewed (0) -
cmd/bucket-replication.go
} // Lock the object name before starting replication operation. // Use separate lock that doesn't collide with regular objects. lk := objectAPI.NewNSLock(bucket, "/[replicate]/"+dobj.ObjectName) lkctx, err := lk.GetLock(ctx, globalOperationTimeout) if err != nil { globalReplicationPool.Get().queueMRFSave(dobj.ToMRFEntry()) sendEvent(eventArgs{ BucketName: bucket, Object: ObjectInfo{ Bucket: bucket,
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Thu Oct 10 06:49:55 UTC 2024 - 116.1K bytes - Viewed (0) -
cmd/data-scanner.go
// The function will block until the context is canceled. // There should only ever be one scanner running per cluster. func runDataScanner(ctx context.Context, objAPI ObjectLayer) { ctx, cancel := globalLeaderLock.GetLock(ctx) defer cancel() // Load current bloom cycle var cycleInfo currentScannerCycle buf, _ := readConfig(ctx, objAPI, dataUsageBloomNamePath) if len(buf) == 8 {
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Tue Oct 22 21:10:34 UTC 2024 - 48.4K bytes - Viewed (0)