- Sort Score
- Num 10 results
- Language All
Results 1 - 10 of 158 for Locker (0.04 seconds)
-
cmd/local-locker.go
Created: Sun Dec 28 19:28:13 GMT 2025 - Last Modified: Sun Sep 28 20:59:21 GMT 2025 - 12K bytes - Click Count (0) -
internal/stmt_store/stmt_store.go
// locker: A synchronization lock that is unlocked after initialization to avoid deadlocks. // Returns: // *Stmt: A newly created statement object for executing SQL operations. // error: An error if the statement preparation fails. New(ctx context.Context, key string, isTransaction bool, connPool ConnPool, locker sync.Locker) (*Stmt, error)
Created: Sun Dec 28 09:35:17 GMT 2025 - Last Modified: Sun Apr 27 06:05:16 GMT 2025 - 6K bytes - Click Count (0) -
cmd/lock-rest-server-common_test.go
TimeLastRefresh: UTCNow().UnixNano(), } locker.ll.lockMap["name"] = []lockRequesterInfo{ lockRequesterInfo1, lockRequesterInfo2, } lri := locker.ll.lockMap["name"] // test unknown uid if locker.ll.removeEntry("name", dsync.LockArgs{ Owner: "owner", UID: "unknown-uid", }, &lri) { t.Errorf("Expected %#v, got %#v", false, true) } if !locker.ll.removeEntry("name", dsync.LockArgs{
Created: Sun Dec 28 19:28:13 GMT 2025 - Last Modified: Wed Apr 09 14:28:39 GMT 2025 - 3.2K bytes - Click Count (0) -
cmd/callhome.go
// Make sure only 1 callhome is running on the cluster. locker := objAPI.NewNSLock(minioMetaBucket, "callhome/runCallhome.lock") lkctx, err := locker.GetLock(ctx, callhomeLeaderLockTimeout) if err != nil { // lock timedout means some other node is the leader, // cycle back return 'true' return true } ctx = lkctx.Context() defer locker.Unlock(lkctx)
Created: Sun Dec 28 19:28:13 GMT 2025 - Last Modified: Fri Aug 29 02:39:48 GMT 2025 - 5.3K bytes - Click Count (0) -
cmd/background-newdisks-heal-ops.go
return nil } if !errors.Is(err, errUnformattedDisk) { return err } } // Prevent parallel erasure set healing locker := z.NewNSLock(minioMetaBucket, fmt.Sprintf("new-drive-healing/%d/%d", poolIdx, setIdx)) lkctx, err := locker.GetLock(ctx, newDiskHealingTimeout) if err != nil { return fmt.Errorf("Healing of drive '%v' on %s pool, belonging to %s erasure set already in progress: %w",
Created: Sun Dec 28 19:28:13 GMT 2025 - Last Modified: Fri Aug 29 02:39:48 GMT 2025 - 16.5K bytes - Click Count (0) -
internal/dsync/drwmutex.go
g := Granted{index: index} if c == nil { log("dsync: nil locker\n") ch <- g return } var locked bool var err error if isReadLock { if locked, err = c.RLock(netLockCtx, args); err != nil { log("dsync: Unable to call RLock failed with %s for %#v at %s\n", err, args, c) } } else { if locked, err = c.Lock(netLockCtx, args); err != nil {Created: Sun Dec 28 19:28:13 GMT 2025 - Last Modified: Fri Aug 29 02:39:48 GMT 2025 - 20.3K bytes - Click Count (0) -
internal/lsync/lrwmutex_test.go
} } func TestSimpleWriteLockTimedOut(t *testing.T) { locked := testSimpleWriteLock(t, time.Second) expected := false if locked != expected { t.Errorf("TestSimpleWriteLockTimedOut(): \nexpected %#v\ngot %#v", expected, locked) } } func testDualWriteLock(t *testing.T, duration time.Duration) (locked bool) { ctx := t.Context() lrwm := NewLRWMutex()
Created: Sun Dec 28 19:28:13 GMT 2025 - Last Modified: Sun Sep 28 20:59:21 GMT 2025 - 7.8K bytes - Click Count (0) -
cmd/namespace-lock.go
} nsLk.ref++ n.lockMap[resource] = nsLk n.lockMapMutex.Unlock() // Locking here will block (until timeout). if readLock { locked = nsLk.GetRLock(ctx, opsID, lockSource, timeout) } else { locked = nsLk.GetLock(ctx, opsID, lockSource, timeout) } if !locked { // We failed to get the lock // Decrement ref count since we failed to get the lock n.lockMapMutex.Lock() n.lockMap[resource].ref--
Created: Sun Dec 28 19:28:13 GMT 2025 - Last Modified: Sun Sep 28 20:59:21 GMT 2025 - 9.2K bytes - Click Count (0) -
cmd/erasure-sets.go
go func(i int, endpoint Endpoint) { defer wg.Done() lk.Lock() // Only add lockers only one per endpoint and per erasure set. if locker, ok := erasureLockers[endpoint.Host]; ok && !lockerEpSet.Contains(endpoint.Host) { lockerEpSet.Add(endpoint.Host) s.erasureLockers[i] = append(s.erasureLockers[i], locker) } lk.Unlock() }(i, endpoints.Endpoints[i*setDriveCount+j]) } } wg.Wait()
Created: Sun Dec 28 19:28:13 GMT 2025 - Last Modified: Fri Aug 29 02:39:48 GMT 2025 - 37K bytes - Click Count (1) -
internal/dsync/dsync_test.go
testDrwMutexRefreshInterval = 100 * time.Millisecond ) // TestMain initializes the testing framework func TestMain(m *testing.M) { startLockServers() // Initialize locker clients for dsync. var clnts []NetLocker for i := range nodes { clnts = append(clnts, newClient(nodes[i].URL)) } ds = &Dsync{ GetLockers: func() ([]NetLocker, string) { return clnts, uuid.New().String() },
Created: Sun Dec 28 19:28:13 GMT 2025 - Last Modified: Fri Aug 29 02:39:48 GMT 2025 - 10.8K bytes - Click Count (0)