- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 288 for Locker (0.12 sec)
-
cmd/local-locker.go
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 12K bytes - Viewed (0) -
internal/stmt_store/stmt_store.go
// locker: A synchronization lock that is unlocked after initialization to avoid deadlocks. // Returns: // *Stmt: A newly created statement object for executing SQL operations. // error: An error if the statement preparation fails. New(ctx context.Context, key string, isTransaction bool, connPool ConnPool, locker sync.Locker) (*Stmt, error)
Registered: Sun Sep 07 09:35:13 UTC 2025 - Last Modified: Sun Apr 27 06:05:16 UTC 2025 - 6K bytes - Viewed (0) -
cmd/lock-rest-server-common_test.go
TimeLastRefresh: UTCNow().UnixNano(), } locker.ll.lockMap["name"] = []lockRequesterInfo{ lockRequesterInfo1, lockRequesterInfo2, } lri := locker.ll.lockMap["name"] // test unknown uid if locker.ll.removeEntry("name", dsync.LockArgs{ Owner: "owner", UID: "unknown-uid", }, &lri) { t.Errorf("Expected %#v, got %#v", false, true) } if !locker.ll.removeEntry("name", dsync.LockArgs{
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Wed Apr 09 14:28:39 UTC 2025 - 3.2K bytes - Viewed (0) -
cmd/callhome.go
// Make sure only 1 callhome is running on the cluster. locker := objAPI.NewNSLock(minioMetaBucket, "callhome/runCallhome.lock") lkctx, err := locker.GetLock(ctx, callhomeLeaderLockTimeout) if err != nil { // lock timedout means some other node is the leader, // cycle back return 'true' return true } ctx = lkctx.Context() defer locker.Unlock(lkctx)
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 5.3K bytes - Viewed (0) -
cmd/background-newdisks-heal-ops.go
return nil } if !errors.Is(err, errUnformattedDisk) { return err } } // Prevent parallel erasure set healing locker := z.NewNSLock(minioMetaBucket, fmt.Sprintf("new-drive-healing/%d/%d", poolIdx, setIdx)) lkctx, err := locker.GetLock(ctx, newDiskHealingTimeout) if err != nil { return fmt.Errorf("Healing of drive '%v' on %s pool, belonging to %s erasure set already in progress: %w",
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 16.5K bytes - Viewed (0) -
cmd/namespace-lock.go
} nsLk.ref++ n.lockMap[resource] = nsLk n.lockMapMutex.Unlock() // Locking here will block (until timeout). if readLock { locked = nsLk.GetRLock(ctx, opsID, lockSource, timeout) } else { locked = nsLk.GetLock(ctx, opsID, lockSource, timeout) } if !locked { // We failed to get the lock // Decrement ref count since we failed to get the lock n.lockMapMutex.Lock() n.lockMap[resource].ref--
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Sun Sep 29 22:40:36 UTC 2024 - 9.2K bytes - Viewed (0) -
cmd/erasure-sets.go
go func(i int, endpoint Endpoint) { defer wg.Done() lk.Lock() // Only add lockers only one per endpoint and per erasure set. if locker, ok := erasureLockers[endpoint.Host]; ok && !lockerEpSet.Contains(endpoint.Host) { lockerEpSet.Add(endpoint.Host) s.erasureLockers[i] = append(s.erasureLockers[i], locker) } lk.Unlock() }(i, endpoints.Endpoints[i*setDriveCount+j]) } } wg.Wait()
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 37K bytes - Viewed (1) -
cmd/erasure.go
// getLockers returns list of remote and local lockers. getLockers func() ([]dsync.NetLocker, string) // getEndpoints returns list of endpoint belonging this set. // some may be local and some remote. getEndpoints func() []Endpoint // getEndpoints returns list of endpoint strings belonging this set. // some may be local and some remote. getEndpointStrings func() []string // Locker mutex map. nsMutex *nsLockMap }
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 16.1K bytes - Viewed (0) -
cmd/admin-handlers.go
var args dsync.LockArgs var lockers []dsync.NetLocker for path := range strings.SplitSeq(vars["paths"], ",") { if path == "" { continue } args.Resources = append(args.Resources, path) } for _, lks := range z.serverPools[0].erasureLockers { lockers = append(lockers, lks...) } for _, locker := range lockers { locker.ForceUnlock(ctx, args) } }
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 99.6K bytes - Viewed (0) -
cmd/storage-rest-server.go
} else { write([]byte{0}) } xioutil.SafeClose(doneCh) return } defer xioutil.SafeClose(doneCh) // Initiate ticker after body has been read. ticker := time.NewTicker(time.Second * 10) defer ticker.Stop() for { select { case <-ticker.C: // The done() might have been called // concurrently, check for it before we // write the filler byte. select {
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Tue May 27 15:19:03 UTC 2025 - 45.7K bytes - Viewed (0)