- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 227 for unpack (0.17 sec)
-
internal/dsync/drwmutex.go
// Caller may use this as an indication to call again. return !checkFailedUnlocks(*locks, tolerance) } // Unlock unlocks the write lock. // // It is a run-time error if dm is not locked on entry to Unlock. func (dm *DRWMutex) Unlock(ctx context.Context) { dm.m.Lock() dm.cancelRefresh() dm.m.Unlock() restClnts, owner := dm.clnt.GetLockers() // create temp array on stack locks := make([]string, len(restClnts))
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Mon Sep 09 15:49:49 UTC 2024 - 20.4K bytes - Viewed (0) -
internal/store/batch.go
b.Lock() defer b.Unlock() if b.isFull() { if b.store == nil { return ErrBatchFull } // commit batch to store if err := b.commit(); err != nil { return err } } b.items = append(b.items, item) return nil } // Len returns the no of items in the batch func (b *Batch[_]) Len() int { b.Lock() defer b.Unlock() return len(b.items) }
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Sep 06 23:06:30 UTC 2024 - 2.9K bytes - Viewed (0) -
cmd/local-locker.go
return concat(s, strconv.Itoa(idx)) } func (l *localLocker) Unlock(_ context.Context, args dsync.LockArgs) (reply bool, err error) { if len(args.Resources) > maxDeleteList { return false, fmt.Errorf("internal error: localLocker.Unlock called with more than %d resources", maxDeleteList) } l.mutex.Lock() defer l.mutex.Unlock() err = nil for _, resource := range args.Resources {
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Wed Jul 24 10:24:01 UTC 2024 - 10.7K bytes - Viewed (0) -
okhttp-hpacktests/README.md
OkHttp HPACK tests ================== These tests use the [hpack-test-case][1] project to validate OkHttp's HPACK implementation. The HPACK test cases are in a separate git submodule, so to initialize them, you must run: git submodule init git submodule update TODO ---- * Add maven goal to avoid manual call to git submodule init. * Make hpack-test-case update itself from git, and run new tests.
Registered: Fri Nov 01 11:42:11 UTC 2024 - Last Modified: Mon Dec 15 16:59:53 UTC 2014 - 578 bytes - Viewed (0) -
cmd/admin-heal-ops.go
select { // Check after a second case <-time.After(time.Second): h.mutex.Unlock() continue case <-h.ctx.Done(): h.mutex.Unlock() // discard result and return. return errHealStopSignalled // Timeout if no results consumed for too long. case <-unconsumedTimer.C: h.mutex.Unlock() return errHealIdleTimeout } } break }
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Sat Oct 26 09:58:27 UTC 2024 - 25.6K bytes - Viewed (0) -
cmd/namespace-lock_test.go
defer close(lk2ch) nsLk.lock(ctx, "volume", "path", "source", "opsID", false, 1*time.Millisecond) }() time.Sleep(1 * time.Millisecond) // wait for goroutine to advance; ref=2 // Unlock the 1st lock; ref=1 after this line nsLk.unlock("volume", "path", false) // Taking another lockMapMutex here allows queuing up additional lockers. This should // not be required but makes reproduction much easier. nsLk.lockMapMutex.Lock()
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Apr 23 18:58:53 UTC 2021 - 3.1K bytes - Viewed (0) -
guava/src/com/google/common/util/concurrent/CycleDetectingLockFactory.java
* * <ul> * <li>for an unnested {@code lock()} and {@code unlock()}, a cycle detecting lock takes 38ns as * opposed to the 24ns taken by a plain lock. * <li>for nested locking, the cost increases with the depth of the nesting: * <ul> * <li>2 levels: average of 64ns per lock()/unlock() * <li>3 levels: average of 77ns per lock()/unlock() * <li>4 levels: average of 99ns per lock()/unlock()
Registered: Fri Nov 01 12:43:10 UTC 2024 - Last Modified: Fri Dec 15 19:31:54 UTC 2023 - 35.9K bytes - Viewed (0) -
src/archive/zip/register.go
fw *flate.Writer } func (w *pooledFlateWriter) Write(p []byte) (n int, err error) { w.mu.Lock() defer w.mu.Unlock() if w.fw == nil { return 0, errors.New("Write after Close") } return w.fw.Write(p) } func (w *pooledFlateWriter) Close() error { w.mu.Lock() defer w.mu.Unlock() var err error if w.fw != nil { err = w.fw.Close() flateWriterPool.Put(w.fw) w.fw = nil } return err
Registered: Tue Nov 05 11:13:11 UTC 2024 - Last Modified: Fri Oct 13 18:36:46 UTC 2023 - 3.7K bytes - Viewed (0) -
internal/grid/muxserver.go
} else { m.send(message{Op: OpDisconnectClientMux, MuxID: m.ID}) } // Unlock, since we are calling deleteMux, which will call close - which will lock recvMu. if locked { m.recvMu.Unlock() defer m.recvMu.Lock() } m.parent.deleteMux(true, m.ID) } func (m *muxServer) send(msg message) { m.sendMu.Lock() defer m.sendMu.Unlock() msg.MuxID = m.ID msg.Seq = m.SendSeq m.SendSeq++ if debugPrint {
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Jun 07 15:51:52 UTC 2024 - 9.7K bytes - Viewed (0) -
internal/pubsub/pubsub.go
} ps.Lock() defer ps.Unlock() sub := &Sub[T]{ch: subCh, types: Mask(mask.Mask()), filter: filter} ps.subs = append(ps.subs, sub) // We hold a lock, so we are safe to update combined := Mask(atomic.LoadUint64(&ps.types)) combined.Merge(Mask(mask.Mask())) atomic.StoreUint64(&ps.types, uint64(combined)) go func() { <-doneCh ps.Lock() defer ps.Unlock() var remainTypes Mask
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Tue Feb 06 16:57:30 UTC 2024 - 5.2K bytes - Viewed (0)