Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 5 of 5 for getBlocks (0.11 sec)

  1. cmd/erasure-object.go

    	nsUnlocker := func() {}
    	defer func() {
    		if unlockOnDefer {
    			nsUnlocker()
    		}
    	}()
    
    	// Acquire lock
    	if !opts.NoLock {
    		lock := er.NewNSLock(bucket, object)
    		lkctx, err := lock.GetRLock(ctx, globalOperationTimeout)
    		if err != nil {
    			return nil, err
    		}
    		ctx = lkctx.Context()
    
    		// Release lock when the metadata is verified, and reader
    		// is ready to be read.
    		//
    Registered: Sun Nov 03 19:28:11 UTC 2024
    - Last Modified: Thu Oct 31 22:10:24 UTC 2024
    - 78.8K bytes
    - Viewed (0)
  2. cmd/background-newdisks-heal-ops.go

    			return err
    		}
    	}
    
    	// Prevent parallel erasure set healing
    	locker := z.NewNSLock(minioMetaBucket, fmt.Sprintf("new-drive-healing/%d/%d", poolIdx, setIdx))
    	lkctx, err := locker.GetLock(ctx, newDiskHealingTimeout)
    	if err != nil {
    		return fmt.Errorf("Healing of drive '%v' on %s pool, belonging to %s erasure set already in progress: %w",
    			disk, humanize.Ordinal(poolIdx+1), humanize.Ordinal(setIdx+1), err)
    Registered: Sun Nov 03 19:28:11 UTC 2024
    - Last Modified: Sat Oct 26 09:58:27 UTC 2024
    - 16.6K bytes
    - Viewed (0)
  3. cmd/bucket-replication.go

    	}
    
    	// Lock the object name before starting replication operation.
    	// Use separate lock that doesn't collide with regular objects.
    	lk := objectAPI.NewNSLock(bucket, "/[replicate]/"+dobj.ObjectName)
    	lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
    	if err != nil {
    		globalReplicationPool.Get().queueMRFSave(dobj.ToMRFEntry())
    		sendEvent(eventArgs{
    			BucketName: bucket,
    			Object: ObjectInfo{
    				Bucket:       bucket,
    Registered: Sun Nov 03 19:28:11 UTC 2024
    - Last Modified: Thu Oct 10 06:49:55 UTC 2024
    - 116.1K bytes
    - Viewed (0)
  4. cmd/data-scanner.go

    // The function will block until the context is canceled.
    // There should only ever be one scanner running per cluster.
    func runDataScanner(ctx context.Context, objAPI ObjectLayer) {
    	ctx, cancel := globalLeaderLock.GetLock(ctx)
    	defer cancel()
    
    	// Load current bloom cycle
    	var cycleInfo currentScannerCycle
    
    	buf, _ := readConfig(ctx, objAPI, dataUsageBloomNamePath)
    	if len(buf) == 8 {
    Registered: Sun Nov 03 19:28:11 UTC 2024
    - Last Modified: Tue Oct 22 21:10:34 UTC 2024
    - 48.4K bytes
    - Viewed (0)
  5. cmd/object-handlers.go

    		return
    	}
    
    	// Take read lock on object, here so subsequent lower-level
    	// calls do not need to.
    	lock := objectAPI.NewNSLock(bucket, object)
    	lkctx, err := lock.GetRLock(ctx, globalOperationTimeout)
    	if err != nil {
    		writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
    		return
    	}
    	ctx = lkctx.Context()
    	defer lock.RUnlock(lkctx)
    
    Registered: Sun Nov 03 19:28:11 UTC 2024
    - Last Modified: Sat Oct 05 05:16:15 UTC 2024
    - 117.4K bytes
    - Viewed (0)
Back to top