Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 99 for lockers (0.09 sec)

  1. cmd/namespace-lock_test.go

    		// Unlock the 1st lock; ref=1 after this line
    		nsLk.unlock("volume", "path", false)
    
    		// Taking another lockMapMutex here allows queuing up additional lockers. This should
    		// not be required but makes reproduction much easier.
    		nsLk.lockMapMutex.Lock()
    
    		// lk3 blocks.
    		lk3ch := make(chan bool)
    		go func() {
    Registered: Sun Sep 07 19:28:11 UTC 2025
    - Last Modified: Fri Aug 29 02:39:48 UTC 2025
    - 3K bytes
    - Viewed (0)
  2. cmd/erasure-sets.go

    		}
    	}
    }
    
    func (s *erasureSets) GetLockers(setIndex int) func() ([]dsync.NetLocker, string) {
    	return func() ([]dsync.NetLocker, string) {
    		lockers := make([]dsync.NetLocker, len(s.erasureLockers[setIndex]))
    		copy(lockers, s.erasureLockers[setIndex])
    		return lockers, s.erasureLockOwner
    	}
    }
    
    func (s *erasureSets) GetEndpointStrings(setIndex int) func() []string {
    	return func() []string {
    Registered: Sun Sep 07 19:28:11 UTC 2025
    - Last Modified: Fri Aug 29 02:39:48 UTC 2025
    - 37K bytes
    - Viewed (1)
  3. cmd/erasure.go

    	// getLockers returns list of remote and local lockers.
    	getLockers func() ([]dsync.NetLocker, string)
    
    	// getEndpoints returns list of endpoint belonging this set.
    	// some may be local and some remote.
    	getEndpoints func() []Endpoint
    
    	// getEndpoints returns list of endpoint strings belonging this set.
    	// some may be local and some remote.
    	getEndpointStrings func() []string
    
    	// Locker mutex map.
    	nsMutex *nsLockMap
    }
    
    Registered: Sun Sep 07 19:28:11 UTC 2025
    - Last Modified: Fri Aug 29 02:39:48 UTC 2025
    - 16.1K bytes
    - Viewed (0)
  4. cmd/admin-handlers.go

    	vars := mux.Vars(r)
    
    	var args dsync.LockArgs
    	var lockers []dsync.NetLocker
    	for path := range strings.SplitSeq(vars["paths"], ",") {
    		if path == "" {
    			continue
    		}
    		args.Resources = append(args.Resources, path)
    	}
    
    	for _, lks := range z.serverPools[0].erasureLockers {
    		lockers = append(lockers, lks...)
    	}
    
    	for _, locker := range lockers {
    		locker.ForceUnlock(ctx, args)
    	}
    }
    
    Registered: Sun Sep 07 19:28:11 UTC 2025
    - Last Modified: Fri Aug 29 02:39:48 UTC 2025
    - 99.6K bytes
    - Viewed (0)
  5. cmd/local-locker.go

    func (l *localLocker) Close() error {
    	return nil
    }
    
    // IsOnline - local locker is always online.
    func (l *localLocker) IsOnline() bool {
    	return true
    }
    
    // IsLocal - local locker returns true.
    func (l *localLocker) IsLocal() bool {
    	return true
    }
    
    func (l *localLocker) ForceUnlock(ctx context.Context, args dsync.LockArgs) (reply bool, err error) {
    	if ctx.Err() != nil {
    		return false, ctx.Err()
    	}
    Registered: Sun Sep 07 19:28:11 UTC 2025
    - Last Modified: Fri Aug 29 02:39:48 UTC 2025
    - 12K bytes
    - Viewed (0)
  6. docs/en/docs/deployment/docker.md

    And if you need to have multiple workers, you can simply use the `--workers` command line option.
    
    /// note | Technical Details
    
    Registered: Sun Sep 07 07:19:17 UTC 2025
    - Last Modified: Sun Aug 31 09:15:41 UTC 2025
    - 29.5K bytes
    - Viewed (1)
  7. cmd/lock-rest-server-common_test.go

    		TimeLastRefresh: UTCNow().UnixNano(),
    	}
    
    	locker.ll.lockMap["name"] = []lockRequesterInfo{
    		lockRequesterInfo1,
    		lockRequesterInfo2,
    	}
    
    	lri := locker.ll.lockMap["name"]
    
    	// test unknown uid
    	if locker.ll.removeEntry("name", dsync.LockArgs{
    		Owner: "owner",
    		UID:   "unknown-uid",
    	}, &lri) {
    		t.Errorf("Expected %#v, got %#v", false, true)
    	}
    
    	if !locker.ll.removeEntry("name", dsync.LockArgs{
    Registered: Sun Sep 07 19:28:11 UTC 2025
    - Last Modified: Wed Apr 09 14:28:39 UTC 2025
    - 3.2K bytes
    - Viewed (0)
  8. docs/orchestration/docker-compose/docker-compose.yaml

      # environment:
        # MINIO_ROOT_USER: minioadmin
        # MINIO_ROOT_PASSWORD: minioadmin
      healthcheck:
        test: ["CMD", "mc", "ready", "local"]
        interval: 5s
        timeout: 5s
        retries: 5
    
    # starts 4 docker containers running minio server instances.
    # using nginx reverse proxy, load balancing, you can access
    # it through port 9000.
    services:
      minio1:
        <<: *minio-common
        hostname: minio1
        volumes:
    Registered: Sun Sep 07 19:28:11 UTC 2025
    - Last Modified: Sun Sep 07 05:14:10 UTC 2025
    - 1.5K bytes
    - Viewed (0)
  9. docs/en/docs/deployment/concepts.md

    /// tip
    
    Don't worry if some of these items about **containers**, Docker, or Kubernetes don't make a lot of sense yet.
    
    I'll tell you more about container images, Docker, Kubernetes, etc. in a future chapter: [FastAPI in Containers - Docker](docker.md){.internal-link target=_blank}.
    
    ///
    
    ## Previous Steps Before Starting { #previous-steps-before-starting }
    
    Registered: Sun Sep 07 07:19:17 UTC 2025
    - Last Modified: Sun Aug 31 09:15:41 UTC 2025
    - 18.6K bytes
    - Viewed (0)
  10. cmd/callhome.go

    	// Make sure only 1 callhome is running on the cluster.
    	locker := objAPI.NewNSLock(minioMetaBucket, "callhome/runCallhome.lock")
    	lkctx, err := locker.GetLock(ctx, callhomeLeaderLockTimeout)
    	if err != nil {
    		// lock timedout means some other node is the leader,
    		// cycle back return 'true'
    		return true
    	}
    
    	ctx = lkctx.Context()
    	defer locker.Unlock(lkctx)
    
    Registered: Sun Sep 07 19:28:11 UTC 2025
    - Last Modified: Fri Aug 29 02:39:48 UTC 2025
    - 5.3K bytes
    - Viewed (0)
Back to top