- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 607 for Oplock (0.85 sec)
-
internal/grid/muxserver.go
} else { m.send(message{Op: OpDisconnectClientMux, MuxID: m.ID}) } // Unlock, since we are calling deleteMux, which will call close - which will lock recvMu. if locked { m.recvMu.Unlock() defer m.recvMu.Lock() } m.parent.deleteMux(true, m.ID) } func (m *muxServer) send(msg message) { m.sendMu.Lock() defer m.sendMu.Unlock() msg.MuxID = m.ID msg.Seq = m.SendSeq m.SendSeq++ if debugPrint {Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Tue May 27 15:19:03 UTC 2025 - 9.7K bytes - Viewed (0) -
internal/s3select/progress.go
// This ensures that Close will block until Read has completed. // This allows another goroutine to close the reader. pr.closedMu.Lock() defer pr.closedMu.Unlock() if pr.closed { return 0, errors.New("progressReader: read after Close") } return pr.processedReader.Read(p) } func (pr *progressReader) Close() error { pr.closedMu.Lock() defer pr.closedMu.Unlock() if pr.closed { return nil }Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 22 00:33:43 UTC 2024 - 4.3K bytes - Viewed (0) -
cmd/erasure-server-pool-rebalance.go
lock := z.serverPools[0].NewNSLock(minioMetaBucket, rebalMetaName) lkCtx, err := lock.GetLock(ctx, globalOperationTimeout) if err != nil { rebalanceLogIf(ctx, fmt.Errorf("failed to acquire write lock on %s/%s: %w", minioMetaBucket, rebalMetaName, err)) return err } defer lock.Unlock(lkCtx) ctx = lkCtx.Context() noLockOpts := ObjectOptions{NoLock: true}
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 28.7K bytes - Viewed (0) -
cmd/local-locker.go
Source string // Contains line, function and filename requesting the lock. Group bool // indicates if it was a group lock. Owner string // Owner represents the UUID of the owner who originally requested the lock. Quorum int // Quorum represents the quorum required for this lock to be active. idx int `msg:"-"` // index of the lock in the lockMap. }
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 12K bytes - Viewed (0) -
cmd/erasure-object.go
nsUnlocker() } }() // Acquire lock if !opts.NoLock { lock := er.NewNSLock(bucket, object) lkctx, err := lock.GetRLock(ctx, globalOperationTimeout) if err != nil { return nil, err } ctx = lkctx.Context() // Release lock when the metadata is verified, and reader // is ready to be read. // // This is possible to be lock free because
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Oct 24 04:05:31 UTC 2025 - 80.4K bytes - Viewed (0) -
cmd/shared-lock.go
for { select { case <-ctx.Done(): return case <-lkctx.Context().Done(): // The context of the lock is canceled, this can happen // if one lock lost quorum due to cluster instability // in that case, try to lock again. break keepLock case ld.lockContext <- lkctx: // Send the lock context to anyone asking for it } } } }Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Mon Feb 13 09:26:38 UTC 2023 - 2.3K bytes - Viewed (0) -
internal/dsync/lock-args_gen.go
Klaus Post <******@****.***> 1759093161 +0200
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 10.2K bytes - Viewed (0) -
internal/dsync/lock-args_gen_test.go
Klaus Post <******@****.***> 1759093161 +0200
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 4.4K bytes - Viewed (0) -
internal/config/storageclass/storage-class.go
func (sCfg *Config) AvailabilityOptimized() bool { ConfigLock.RLock() defer ConfigLock.RUnlock() if !sCfg.initialized { return true } return sCfg.Optimize == "availability" || sCfg.Optimize == "" } // Update update storage-class with new config func (sCfg *Config) Update(newCfg Config) { ConfigLock.Lock() defer ConfigLock.Unlock() sCfg.RRS = newCfg.RRS sCfg.Standard = newCfg.Standard
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 12.3K bytes - Viewed (0) -
cmd/erasure-server-pool-decom.go
} // if no update is needed return right away. if !update { z.poolMetaMutex.Lock() z.poolMeta = meta z.poolMetaMutex.Unlock() } else { newMeta := newPoolMeta(z, meta) if err = newMeta.save(ctx, z.serverPools); err != nil { return err } z.poolMetaMutex.Lock() z.poolMeta = newMeta z.poolMetaMutex.Unlock() } pools := meta.returnResumablePools()Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 42.2K bytes - Viewed (1)