Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 56 for workerCh (0.06 sec)

  1. cmd/bucket-replication.go

    	if (checkOld > 0 && len(p.workers) != checkOld) || n == len(p.workers) || n < 1 {
    		// Either already satisfied or worker count changed while we waited for the lock.
    		return
    	}
    	for len(p.workers) < n {
    		input := make(chan ReplicationWorkerOperation, 10000)
    		p.workers = append(p.workers, input)
    
    		go p.AddWorker(input, &p.activeWorkers)
    	}
    	for len(p.workers) > n {
    		worker := p.workers[len(p.workers)-1]
    Registered: Sun Sep 07 19:28:11 UTC 2025
    - Last Modified: Fri Aug 29 02:39:48 UTC 2025
    - 118K bytes
    - Viewed (0)
  2. cmd/bucket-replication-utils.go

    type replicationResyncer struct {
    	// map of bucket to their resync status
    	statusMap      map[string]BucketReplicationResyncStatus
    	workerSize     int
    	resyncCancelCh chan struct{}
    	workerCh       chan struct{}
    	sync.RWMutex
    }
    
    const (
    	replicationDir      = ".replication"
    	resyncFileName      = "resync.bin"
    	resyncMetaFormat    = 1
    	resyncMetaVersionV1 = 1
    Registered: Sun Sep 07 19:28:11 UTC 2025
    - Last Modified: Fri Aug 29 02:39:48 UTC 2025
    - 26K bytes
    - Viewed (0)
  3. docs/en/docs/deployment/server-workers.md

    In particular, when running on **Kubernetes** you will probably **not** want to use workers and instead run **a single Uvicorn process per container**, but I'll tell you about it later in that chapter.
    
    ///
    
    ## Multiple Workers { #multiple-workers }
    
    You can start multiple workers with the `--workers` command line option:
    
    //// tab | `fastapi`
    
    If you use the `fastapi` command:
    
    <div class="termy">
    
    Registered: Sun Sep 07 07:19:17 UTC 2025
    - Last Modified: Sun Aug 31 09:15:41 UTC 2025
    - 8.3K bytes
    - Viewed (0)
  4. docs/en/docs/deployment/concepts.md

    Here are some possible combinations and strategies:
    
    * **Uvicorn** with `--workers`
        * One Uvicorn **process manager** would listen on the **IP** and **port**, and it would start **multiple Uvicorn worker processes**.
    * **Kubernetes** and other distributed **container systems**
    Registered: Sun Sep 07 07:19:17 UTC 2025
    - Last Modified: Sun Aug 31 09:15:41 UTC 2025
    - 18.6K bytes
    - Viewed (0)
  5. docs/en/docs/deployment/docker.md

    And if you need to have multiple workers, you can simply use the `--workers` command line option.
    
    /// note | Technical Details
    
    The Docker image was created when Uvicorn didn't support managing and restarting dead workers, so it was needed to use Gunicorn with Uvicorn, which added quite some complexity, just to have Gunicorn manage and restart the Uvicorn worker processes.
    
    Registered: Sun Sep 07 07:19:17 UTC 2025
    - Last Modified: Sun Aug 31 09:15:41 UTC 2025
    - 29.5K bytes
    - Viewed (1)
  6. cmd/bucket-lifecycle.go

    		workers = workers[:len(workers)-1]
    		worker <- expiryOp(nil)
    		es.stats.workers.Add(-1)
    	}
    	// Atomically replace workers.
    	es.workers.Store(&workers)
    }
    
    // Worker handles 4 types of expiration tasks.
    // 1. Expiry of objects, includes regular and transitioned objects
    // 2. Expiry of noncurrent versions due to NewerNoncurrentVersions
    Registered: Sun Sep 07 19:28:11 UTC 2025
    - Last Modified: Fri Aug 29 02:39:48 UTC 2025
    - 33.7K bytes
    - Viewed (0)
  7. cmd/data-scanner_test.go

    		t.Run(fmt.Sprintf("TestApplyNewerNoncurrentVersionsLimit-%d", i), func(t *testing.T) {
    			workers := []chan expiryOp{make(chan expiryOp)}
    			es.workers.Store(&workers)
    			workerReady := make(chan struct{})
    			var wg sync.WaitGroup
    			wg.Add(1)
    			var gotExpired []ObjectToDelete
    			go expiryWorker(&wg, workerReady, workers[0], &gotExpired)
    			<-workerReady
    
    			item := scannerItem{
    				Path:        obj,
    Registered: Sun Sep 07 19:28:11 UTC 2025
    - Last Modified: Fri Aug 29 02:39:48 UTC 2025
    - 12K bytes
    - Viewed (0)
  8. cmd/batch-handlers.go

    // newBatchJobPool creates a pool of job manifest workers of specified size
    func newBatchJobPool(ctx context.Context, o ObjectLayer, workers int) *BatchJobPool {
    	jpool := &BatchJobPool{
    		ctx:          ctx,
    		objLayer:     o,
    		jobCh:        make(chan *BatchJobRequest, 10000),
    		workerKillCh: make(chan struct{}, workers),
    		jobCancelers: make(map[string]context.CancelFunc),
    	}
    Registered: Sun Sep 07 19:28:11 UTC 2025
    - Last Modified: Fri Aug 29 02:39:48 UTC 2025
    - 63.5K bytes
    - Viewed (0)
  9. cmd/erasure-server-pool-rebalance.go

    	if err != nil {
    		rebalanceLogIf(ctx, fmt.Errorf("invalid workers value err: %v, defaulting to %d", err, len(pool.sets)))
    		workerSize = len(pool.sets)
    	}
    
    	// Each decom worker needs one List() goroutine/worker
    	// add that many extra workers.
    	workerSize += len(pool.sets)
    
    	wk, err := workers.New(workerSize)
    	if err != nil {
    		return err
    	}
    
    Registered: Sun Sep 07 19:28:11 UTC 2025
    - Last Modified: Thu Sep 04 20:47:24 UTC 2025
    - 28.9K bytes
    - Viewed (0)
  10. cmd/batch-rotate.go

    	}
    
    	workerSize, err := strconv.Atoi(env.Get("_MINIO_BATCH_KEYROTATION_WORKERS", strconv.Itoa(runtime.GOMAXPROCS(0)/2)))
    	if err != nil {
    		return err
    	}
    
    	wk, err := workers.New(workerSize)
    	if err != nil {
    		// invalid worker size.
    		return err
    	}
    
    	ctx, cancel := context.WithCancel(ctx)
    
    	results := make(chan itemOrErr[ObjectInfo], 100)
    Registered: Sun Sep 07 19:28:11 UTC 2025
    - Last Modified: Fri Aug 29 02:39:48 UTC 2025
    - 14.7K bytes
    - Viewed (0)
Back to top