- Sort Score
- Result 10 results
- Languages All
Results 81 - 90 of 178 for RUnlock (0.41 sec)
-
cmd/object-handlers.go
lkctx, err := lock.GetRLock(ctx, globalOperationTimeout) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } ctx = lkctx.Context() defer lock.RUnlock(lkctx) getObjectNInfo := objectAPI.GetObjectNInfo gopts := opts gopts.NoLock = true // We already have a lock, we can live with it. objInfo, err := getObjectInfo(ctx, bucket, object, gopts)
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Sat Oct 05 05:16:15 UTC 2024 - 117.4K bytes - Viewed (0) -
internal/dsync/dsync_test.go
dm.Unlock(context.Background()) } func TestSimpleLockUnlockMultipleTimes(t *testing.T) { dm := NewDRWMutex(ds, "test") dm.Lock(id, source) time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond) dm.Unlock(context.Background()) dm.Lock(id, source) time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond) dm.Unlock(context.Background()) dm.Lock(id, source)
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Wed Jun 19 14:35:19 UTC 2024 - 11.1K bytes - Viewed (0) -
guava-tests/test/com/google/common/util/concurrent/CycleDetectingLockFactoryTest.java
Lock lockB = factory.newReentrantLock(OtherOrder.SECOND); lockA.lock(); lockA.lock(); lockB.lock(); lockB.lock(); lockA.unlock(); lockA.unlock(); lockB.unlock(); lockB.unlock(); } public void testExplicitOrdering_acquiringMultipleLocksWithSameRank() { CycleDetectingLockFactory.WithExplicitOrdering<OtherOrder> factory =
Registered: Fri Nov 01 12:43:10 UTC 2024 - Last Modified: Fri Oct 18 22:10:29 UTC 2024 - 16.1K bytes - Viewed (0) -
android/guava-tests/test/com/google/common/util/concurrent/CycleDetectingLockFactoryTest.java
Lock lockB = factory.newReentrantLock(OtherOrder.SECOND); lockA.lock(); lockA.lock(); lockB.lock(); lockB.lock(); lockA.unlock(); lockA.unlock(); lockB.unlock(); lockB.unlock(); } public void testExplicitOrdering_acquiringMultipleLocksWithSameRank() { CycleDetectingLockFactory.WithExplicitOrdering<OtherOrder> factory =
Registered: Fri Nov 01 12:43:10 UTC 2024 - Last Modified: Fri Oct 18 22:10:29 UTC 2024 - 16.1K bytes - Viewed (0) -
internal/ringbuffer/ring_buffer.go
func (r *RingBuffer) Reset() { r.mu.Lock() defer r.mu.Unlock() // Set error so any readers/writers will return immediately. r.setErr(errors.New("reset called"), true) if r.block { r.readCond.Broadcast() r.writeCond.Broadcast() } // Unlock the mutex so readers/writers can finish. r.mu.Unlock() r.wg.Wait() r.mu.Lock() r.r = 0 r.w = 0 r.err = nil
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Wed May 15 00:11:04 UTC 2024 - 13.3K bytes - Viewed (0) -
internal/store/batch.go
b.Lock() defer b.Unlock() if b.isFull() { if b.store == nil { return ErrBatchFull } // commit batch to store if err := b.commit(); err != nil { return err } } b.items = append(b.items, item) return nil } // Len returns the no of items in the batch func (b *Batch[_]) Len() int { b.Lock() defer b.Unlock() return len(b.items) }
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Sep 06 23:06:30 UTC 2024 - 2.9K bytes - Viewed (0) -
cmd/namespace-lock_test.go
defer close(lk2ch) nsLk.lock(ctx, "volume", "path", "source", "opsID", false, 1*time.Millisecond) }() time.Sleep(1 * time.Millisecond) // wait for goroutine to advance; ref=2 // Unlock the 1st lock; ref=1 after this line nsLk.unlock("volume", "path", false) // Taking another lockMapMutex here allows queuing up additional lockers. This should // not be required but makes reproduction much easier. nsLk.lockMapMutex.Lock()
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Apr 23 18:58:53 UTC 2021 - 3.1K bytes - Viewed (0) -
guava/src/com/google/common/util/concurrent/CycleDetectingLockFactory.java
* * <ul> * <li>for an unnested {@code lock()} and {@code unlock()}, a cycle detecting lock takes 38ns as * opposed to the 24ns taken by a plain lock. * <li>for nested locking, the cost increases with the depth of the nesting: * <ul> * <li>2 levels: average of 64ns per lock()/unlock() * <li>3 levels: average of 77ns per lock()/unlock() * <li>4 levels: average of 99ns per lock()/unlock()
Registered: Fri Nov 01 12:43:10 UTC 2024 - Last Modified: Fri Dec 15 19:31:54 UTC 2023 - 35.9K bytes - Viewed (0) -
src/archive/zip/register.go
fw *flate.Writer } func (w *pooledFlateWriter) Write(p []byte) (n int, err error) { w.mu.Lock() defer w.mu.Unlock() if w.fw == nil { return 0, errors.New("Write after Close") } return w.fw.Write(p) } func (w *pooledFlateWriter) Close() error { w.mu.Lock() defer w.mu.Unlock() var err error if w.fw != nil { err = w.fw.Close() flateWriterPool.Put(w.fw) w.fw = nil } return err
Registered: Tue Nov 05 11:13:11 UTC 2024 - Last Modified: Fri Oct 13 18:36:46 UTC 2023 - 3.7K bytes - Viewed (0) -
internal/grid/muxserver.go
} else { m.send(message{Op: OpDisconnectClientMux, MuxID: m.ID}) } // Unlock, since we are calling deleteMux, which will call close - which will lock recvMu. if locked { m.recvMu.Unlock() defer m.recvMu.Lock() } m.parent.deleteMux(true, m.ID) } func (m *muxServer) send(msg message) { m.sendMu.Lock() defer m.sendMu.Unlock() msg.MuxID = m.ID msg.Seq = m.SendSeq m.SendSeq++ if debugPrint {
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Jun 07 15:51:52 UTC 2024 - 9.7K bytes - Viewed (0)