- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 83 for RLock (0.26 sec)
-
cmd/erasure-server-pool-decom.go
} func (z *erasureServerPools) decommissionInBackground(ctx context.Context, idx int) error { pool := z.serverPools[idx] z.poolMetaMutex.RLock() pending := z.poolMeta.PendingBuckets(idx) z.poolMetaMutex.RUnlock() for _, bucket := range pending { z.poolMetaMutex.RLock() isDecommissioned := z.poolMeta.isBucketDecommissioned(idx, bucket.String()) z.poolMetaMutex.RUnlock() if isDecommissioned {
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 42.2K bytes - Viewed (1) -
cmd/iam-etcd-store.go
return &IAMEtcdStore{ iamCache: newIamCache(), client: client, usersSysType: usersSysType, } } func (ies *IAMEtcdStore) rlock() *iamCache { ies.RLock() return ies.iamCache } func (ies *IAMEtcdStore) runlock() { ies.RUnlock() } func (ies *IAMEtcdStore) lock() *iamCache { ies.Lock() return ies.iamCache }
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 13.9K bytes - Viewed (0) -
cmd/erasure-server-pool-rebalance.go
} const ( rebalMetaName = "rebalance.bin" rebalMetaFmt = 1 rebalMetaVer = 1 ) func (z *erasureServerPools) nextRebalBucket(poolIdx int) (string, bool) { z.rebalMu.RLock() defer z.rebalMu.RUnlock() r := z.rebalMeta if r == nil { return "", false } ps := r.PoolStats[poolIdx] if ps == nil { return "", false }
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 28.7K bytes - Viewed (0) -
internal/logger/audit.go
auditTgts := AuditTargets() if len(auditTgts) == 0 { return } var entry audit.Entry if w != nil && r != nil { reqInfo := GetReqInfo(ctx) if reqInfo == nil { return } reqInfo.RLock() defer reqInfo.RUnlock() entry = internalAudit.ToEntry(w, r, reqClaims, xhttp.GlobalDeploymentID) // indicates all requests for this API call are inbound entry.Trigger = "incoming"
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 4.6K bytes - Viewed (0) -
cmd/metrics-resource.go
continue } updateDriveIOStats(dm.IOStats, latestStats, labels) latestDriveStats[d] = dm.IOStats } lastDriveStatsRefresh = time.Now().UTC() latestDriveStatsMu.Unlock() globalLocalDrivesMu.RLock() localDrives := cloneDrives(globalLocalDrivesMap) globalLocalDrivesMu.RUnlock() for _, d := range localDrives { di, err := d.DiskInfo(GlobalContext, DiskInfoOptions{})
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Oct 10 18:57:03 UTC 2025 - 17.2K bytes - Viewed (0) -
cmd/local-locker.go
} } // None found return false, perhaps entry removed in previous run. return false } func (l *localLocker) RLock(ctx context.Context, args dsync.LockArgs) (reply bool, err error) { if len(args.Resources) != 1 { return false, fmt.Errorf("internal error: localLocker.RLock called with more than one resource") } // If we have too many waiting, reject this at once. if l.waitMutex.Load() > lockMutexWaitLimit {Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 12K bytes - Viewed (0) -
cmd/bucket-replication.go
if p.ActiveWorkers() < maxWorkers { p.mu.RLock() workers := min(len(p.workers)+1, maxWorkers) existing := len(p.workers) p.mu.RUnlock() p.ResizeWorkers(workers, existing) } maxMRFWorkers := min(maxWorkers, MRFWorkerMaxLimit) if p.ActiveMRFWorkers() < maxMRFWorkers { p.mu.RLock() workers := min(p.mrfWorkerSize+1, maxMRFWorkers) p.mu.RUnlock()
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 118.2K bytes - Viewed (0) -
prepare_stmt.go
func (db *PreparedStmtDB) Reset() { db.Close() } func (db *PreparedStmtDB) prepare(ctx context.Context, conn ConnPool, isTransaction bool, query string) (_ *stmt_store.Stmt, err error) { db.Mux.RLock() if db.Stmts != nil { if stmt, ok := db.Stmts.Get(query); ok && (!stmt.Transaction || isTransaction) { db.Mux.RUnlock() return stmt, stmt.Error() } } db.Mux.RUnlock() // retry db.Mux.Lock()
Registered: Sun Dec 28 09:35:17 UTC 2025 - Last Modified: Fri Apr 25 08:22:26 UTC 2025 - 5.7K bytes - Viewed (0) -
cmd/metacache-bucket.go
b.updated = true return existing, nil } // cloneCaches will return a clone of all current caches. func (b *bucketMetacache) cloneCaches() (map[string]metacache, map[string][]string) { b.mu.RLock() defer b.mu.RUnlock() dst := make(map[string]metacache, len(b.caches)) maps.Copy(dst, b.caches) // Copy indexes dst2 := make(map[string][]string, len(b.cachesRoot)) for k, v := range b.cachesRoot {
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 6.6K bytes - Viewed (0) -
cmd/erasure-decode.go
// if readTrigger is false, it implies previous disk.ReadAt() was successful and there is no need // to try reading the next disk. for readTrigger := range readTriggerCh { newBufLK.RLock() canDecode := p.canDecode(newBuf) newBufLK.RUnlock() if canDecode { break } if readerIndex == len(p.readers) { break } if !readTrigger { continue } wg.Add(1)
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Thu Aug 29 01:40:52 UTC 2024 - 9.5K bytes - Viewed (0)