- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 54 for RLock (0.02 sec)
-
cmd/lock-rest-server.go
return l.makeResp(resp, err) } // RLockHandler - Acquires an RLock. func (l *lockRESTServer) RLockHandler(args *dsync.LockArgs) (*dsync.LockResp, *grid.RemoteErr) { // Add a timeout similar to what we expect upstream. ctx, cancel := context.WithTimeout(context.Background(), dsync.DefaultTimeouts.Acquire) defer cancel() resp := lockRPCRLock.NewResponse() success, err := l.ll.RLock(ctx, *args) if err == nil && !success { err = errLockConflict
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Jan 31 19:54:34 UTC 2025 - 6.3K bytes - Viewed (0) -
cmd/site-replication-utils.go
sm.resyncStatus[rs.ResyncID] = rs } sm.Unlock() } return nil } func (sm *siteResyncMetrics) report(dID string) *madmin.SiteResyncMetrics { sm.RLock() defer sm.RUnlock() rst, ok := sm.peerResyncMap[dID] if !ok { return nil } rs, ok := sm.resyncStatus[rst.resyncID] if !ok { return nil } m := madmin.SiteResyncMetrics{
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 8.8K bytes - Viewed (0) -
cmd/site-replication.go
duration := max(time.Duration(r.Float64()*float64(time.Minute)), // Make sure to sleep at least a second to avoid high CPU ticks. time.Second) time.Sleep(duration) } c.RLock() defer c.RUnlock() if c.enabled { logger.Info("Cluster replication initialized") } return nil } func (c *SiteReplicationSys) loadFromDisk(ctx context.Context, objAPI ObjectLayer) error {
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 184.7K bytes - Viewed (0) -
cmd/bucket-replication-metrics.go
q.Lock() defer q.Unlock() q.srQueueStats.update() for _, s := range q.bucketStats { s.update() } } func (q *queueCache) getBucketStats(bucket string) InQueueMetric { q.RLock() defer q.RUnlock() v, ok := q.bucketStats[bucket] if !ok { return InQueueMetric{} } return InQueueMetric{ Curr: QStat{Bytes: float64(v.nowBytes), Count: float64(v.nowCount)},
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Thu Aug 15 12:04:40 UTC 2024 - 14.2K bytes - Viewed (0) -
cmd/event-notification.go
targetIDSet[k] = struct{}{} } evnot.targetList.Remove(targetIDSet) } // Send - sends the event to all registered notification targets func (evnot *EventNotifier) Send(args eventArgs) { evnot.RLock() targetIDSet := evnot.bucketRulesMap[args.BucketName].Match(args.EventName, args.Object.Name) evnot.RUnlock() if len(targetIDSet) == 0 { return }
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Jun 21 22:22:24 UTC 2024 - 7.7K bytes - Viewed (0) -
cmd/api-router.go
) func newHTTPServerFn() *xhttp.Server { globalObjLayerMutex.RLock() defer globalObjLayerMutex.RUnlock() return globalHTTPServer } func setHTTPServer(h *xhttp.Server) { globalObjLayerMutex.Lock() globalHTTPServer = h globalObjLayerMutex.Unlock() } func newConsoleServerFn() *consoleapi.Server { globalObjLayerMutex.RLock() defer globalObjLayerMutex.RUnlock() return globalConsoleSrv }
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Wed May 07 15:37:12 UTC 2025 - 23.3K bytes - Viewed (0) -
cmd/local-locker.go
} } // None found return false, perhaps entry removed in previous run. return false } func (l *localLocker) RLock(ctx context.Context, args dsync.LockArgs) (reply bool, err error) { if len(args.Resources) != 1 { return false, fmt.Errorf("internal error: localLocker.RLock called with more than one resource") } // If we have too many waiting, reject this at once. if l.waitMutex.Load() > lockMutexWaitLimit {
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 12K bytes - Viewed (0) -
cmd/erasure-server-pool-rebalance.go
} const ( rebalMetaName = "rebalance.bin" rebalMetaFmt = 1 rebalMetaVer = 1 ) func (z *erasureServerPools) nextRebalBucket(poolIdx int) (string, bool) { z.rebalMu.RLock() defer z.rebalMu.RUnlock() r := z.rebalMeta if r == nil { return "", false } ps := r.PoolStats[poolIdx] if ps == nil { return "", false }
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Thu Sep 04 20:47:24 UTC 2025 - 28.9K bytes - Viewed (0) -
cmd/erasure-server-pool-decom.go
} func (z *erasureServerPools) decommissionInBackground(ctx context.Context, idx int) error { pool := z.serverPools[idx] z.poolMetaMutex.RLock() pending := z.poolMeta.PendingBuckets(idx) z.poolMetaMutex.RUnlock() for _, bucket := range pending { z.poolMetaMutex.RLock() isDecommissioned := z.poolMeta.isBucketDecommissioned(idx, bucket.String()) z.poolMetaMutex.RUnlock() if isDecommissioned {
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 42.1K bytes - Viewed (1) -
cmd/bucket-replication.go
if p.ActiveWorkers() < maxWorkers { p.mu.RLock() workers := min(len(p.workers)+1, maxWorkers) existing := len(p.workers) p.mu.RUnlock() p.ResizeWorkers(workers, existing) } maxMRFWorkers := min(maxWorkers, MRFWorkerMaxLimit) if p.ActiveMRFWorkers() < maxMRFWorkers { p.mu.RLock() workers := min(p.mrfWorkerSize+1, maxMRFWorkers) p.mu.RUnlock()
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 118K bytes - Viewed (0)