- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 207 for unlock (0.14 sec)
-
internal/pubsub/pubsub.go
} ps.Lock() defer ps.Unlock() sub := &Sub[T]{ch: subCh, types: Mask(mask.Mask()), filter: filter} ps.subs = append(ps.subs, sub) // We hold a lock, so we are safe to update combined := Mask(atomic.LoadUint64(&ps.types)) combined.Merge(Mask(mask.Mask())) atomic.StoreUint64(&ps.types, uint64(combined)) go func() { <-doneCh ps.Lock() defer ps.Unlock()
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Tue Feb 06 16:57:30 UTC 2024 - 5.2K bytes - Viewed (0) -
cmd/bucket-metadata-sys.go
buckets = buckets[count:] } sys.Lock() sys.initialized = true sys.Unlock() if globalIsDistErasure { go sys.refreshBucketsMetadataLoop(ctx) } } // Reset the state of the BucketMetadataSys. func (sys *BucketMetadataSys) Reset() { sys.Lock() for k := range sys.metadataMap { delete(sys.metadataMap, k) } sys.Unlock() }
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Wed Aug 28 15:32:18 UTC 2024 - 20.4K bytes - Viewed (0) -
internal/ringbuffer/ring_buffer.go
func (r *RingBuffer) Reset() { r.mu.Lock() defer r.mu.Unlock() // Set error so any readers/writers will return immediately. r.setErr(errors.New("reset called"), true) if r.block { r.readCond.Broadcast() r.writeCond.Broadcast() } // Unlock the mutex so readers/writers can finish. r.mu.Unlock() r.wg.Wait() r.mu.Lock() r.r = 0 r.w = 0 r.err = nil
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Wed May 15 00:11:04 UTC 2024 - 13.3K bytes - Viewed (0) -
cmd/lock-rest-client_test.go
// Attempt all calls. _, err = lkClient.RLock(context.Background(), dsync.LockArgs{}) if err == nil { t.Fatal("Expected for Rlock to fail") } _, err = lkClient.Lock(context.Background(), dsync.LockArgs{}) if err == nil { t.Fatal("Expected for Lock to fail") } _, err = lkClient.RUnlock(context.Background(), dsync.LockArgs{}) if err == nil { t.Fatal("Expected for RUnlock to fail") }
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Mon Jul 29 18:10:04 UTC 2024 - 2K bytes - Viewed (0) -
cmd/bucket-replication-stats.go
return ActiveWorkerStat{} } r.wlock.RLock() defer r.wlock.RUnlock() w := r.workers.get() return ActiveWorkerStat{ Curr: w.Curr, Max: w.Max, Avg: w.Avg, } } func (r *ReplicationStats) collectWorkerMetrics(ctx context.Context) { if r == nil { return } for { select { case <-ctx.Done(): return case <-r.wTimer.C: r.wlock.Lock() r.workers.update()
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Thu Aug 15 12:04:40 UTC 2024 - 13.6K bytes - Viewed (0) -
internal/lock/lock.go
*LockedFile mutex sync.Mutex refs int // Holds read lock refs. } // IsClosed - Check if the rlocked file is already closed. func (r *RLockedFile) IsClosed() bool { r.mutex.Lock() defer r.mutex.Unlock() return r.refs == 0 } // IncLockRef - is used by called to indicate lock refs. func (r *RLockedFile) IncLockRef() { r.mutex.Lock() r.refs++ r.mutex.Unlock() }
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Sun Jan 02 17:15:06 UTC 2022 - 2.5K bytes - Viewed (0) -
internal/store/batch.go
func (b *Batch[I]) Add(item I) error { b.Lock() defer b.Unlock() if b.isFull() { if b.store == nil { return ErrBatchFull } // commit batch to store if err := b.commit(); err != nil { return err } } b.items = append(b.items, item) return nil } // Len returns the no of items in the batch func (b *Batch[_]) Len() int { b.Lock() defer b.Unlock() return len(b.items)
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Sep 06 23:06:30 UTC 2024 - 2.9K bytes - Viewed (0) -
cmd/site-replication-utils.go
}() } } wg.Wait() sm.Unlock() } sTimer.Reset(siteResyncSaveInterval) case <-ctx.Done(): return } } } // update overall site resync state func (sm *siteResyncMetrics) updateState(s SiteResyncStatus) error { if !globalSiteReplicationSys.isEnabled() { return nil } sm.Lock() defer sm.Unlock() switch s.Status { case ResyncStarted:
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Thu Jan 18 07:03:17 UTC 2024 - 8.9K bytes - Viewed (0) -
cmd/namespace-lock_test.go
nsLk.lock(ctx, "volume", "path", "source", "opsID", false, 1*time.Millisecond) }() time.Sleep(1 * time.Millisecond) // wait for goroutine to advance; ref=2 // Unlock the 1st lock; ref=1 after this line nsLk.unlock("volume", "path", false) // Taking another lockMapMutex here allows queuing up additional lockers. This should // not be required but makes reproduction much easier. nsLk.lockMapMutex.Lock()
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Apr 23 18:58:53 UTC 2021 - 3.1K bytes - Viewed (0) -
cmd/background-newdisks-heal-ops.go
h.mu.RLock() defer h.mu.RUnlock() return h.Object } func (h *healingTracker) setObject(object string) { h.mu.Lock() defer h.mu.Unlock() h.Object = object } func (h *healingTracker) updateProgress(success, skipped bool, bytes uint64) { h.mu.Lock() defer h.mu.Unlock() switch { case success: h.ItemsHealed++ h.BytesDone += bytes case skipped: h.ItemsSkipped++
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Sat Oct 26 09:58:27 UTC 2024 - 16.6K bytes - Viewed (0)