Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 45 for UntilWithContext (0.21 sec)

  1. pkg/controller/nodelifecycle/node_lifecycle_controller.go

    		// be re-queued until "Done", so no more than one worker handle the same item and
    		// no event missed.
    		go wait.UntilWithContext(ctx, nc.doNodeProcessingPassWorker, time.Second)
    	}
    
    	for i := 0; i < podUpdateWorkerSize; i++ {
    		go wait.UntilWithContext(ctx, nc.doPodProcessingWorker, time.Second)
    	}
    
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Sat May 04 18:33:12 UTC 2024
    - 51.6K bytes
    - Viewed (0)
  2. pkg/controller/volume/persistentvolume/pv_controller_base.go

    		return
    	}
    
    	ctrl.initializeCaches(logger, ctrl.volumeLister, ctrl.claimLister)
    
    	go wait.Until(func() { ctrl.resync(ctx) }, ctrl.resyncPeriod, ctx.Done())
    	go wait.UntilWithContext(ctx, ctrl.volumeWorker, time.Second)
    	go wait.UntilWithContext(ctx, ctrl.claimWorker, time.Second)
    
    	metrics.Register(ctrl.volumes.store, ctrl.claims, &ctrl.volumePluginMgr)
    
    	<-ctx.Done()
    }
    
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Fri May 10 08:42:31 UTC 2024
    - 29.5K bytes
    - Viewed (0)
  3. pkg/controller/ttlafterfinished/ttlafterfinished_controller.go

    	if !cache.WaitForNamedCacheSync("TTL after finished", ctx.Done(), tc.jListerSynced) {
    		return
    	}
    
    	for i := 0; i < workers; i++ {
    		go wait.UntilWithContext(ctx, tc.worker, time.Second)
    	}
    
    	<-ctx.Done()
    }
    
    func (tc *Controller) addJob(logger klog.Logger, obj interface{}) {
    	job := obj.(*batch.Job)
    	logger.V(4).Info("Adding job", "job", klog.KObj(job))
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Tue May 07 23:59:28 UTC 2024
    - 10.4K bytes
    - Viewed (0)
  4. pkg/controller/resourcequota/resource_quota_monitor.go

    	// shutted down, so we need to shut it down in a separate goroutine.
    	go func() {
    		defer utilruntime.HandleCrash()
    		defer qm.resourceChanges.ShutDown()
    
    		<-ctx.Done()
    	}()
    	wait.UntilWithContext(ctx, qm.runProcessResourceChanges, 1*time.Second)
    
    	// Stop any running monitors.
    	qm.monitorLock.Lock()
    	defer qm.monitorLock.Unlock()
    	monitors := qm.monitors
    	stopped := 0
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Sat May 04 18:33:12 UTC 2024
    - 11.6K bytes
    - Viewed (0)
  5. pkg/controller/podgc/gc_controller.go

    	defer gcc.nodeQueue.ShutDown()
    	defer logger.Info("Shutting down GC controller")
    
    	if !cache.WaitForNamedCacheSync("GC", ctx.Done(), gcc.podListerSynced, gcc.nodeListerSynced) {
    		return
    	}
    
    	go wait.UntilWithContext(ctx, gcc.gc, gcc.gcCheckPeriod)
    
    	<-ctx.Done()
    }
    
    func (gcc *PodGCController) gc(ctx context.Context) {
    	pods, err := gcc.podLister.List(labels.Everything())
    	if err != nil {
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Sat May 04 18:33:12 UTC 2024
    - 13.3K bytes
    - Viewed (0)
  6. pkg/controller/volume/expand/expand_controller.go

    	defer logger.Info("Shutting down expand controller")
    
    	if !cache.WaitForNamedCacheSync("expand", ctx.Done(), expc.pvcsSynced) {
    		return
    	}
    
    	for i := 0; i < defaultWorkerCount; i++ {
    		go wait.UntilWithContext(ctx, expc.runWorker, time.Second)
    	}
    
    	<-ctx.Done()
    }
    
    func (expc *expandController) runWorker(ctx context.Context) {
    	for expc.processNextWorkItem(ctx) {
    	}
    }
    
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Sat May 04 18:33:12 UTC 2024
    - 16.2K bytes
    - Viewed (0)
  7. pkg/controller/volume/pvcprotection/pvc_protection_controller.go

    	if !cache.WaitForNamedCacheSync("PVC protection", ctx.Done(), c.pvcListerSynced, c.podListerSynced) {
    		return
    	}
    
    	for i := 0; i < workers; i++ {
    		go wait.UntilWithContext(ctx, c.runWorker, time.Second)
    	}
    
    	<-ctx.Done()
    }
    
    func (c *Controller) runWorker(ctx context.Context) {
    	for c.processNextWorkItem(ctx) {
    	}
    }
    
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Sat May 04 18:33:12 UTC 2024
    - 14K bytes
    - Viewed (0)
  8. pkg/controller/garbagecollector/garbagecollector.go

    	}) {
    		return
    	}
    
    	logger.Info("All resource monitors have synced. Proceeding to collect garbage")
    
    	// gc workers
    	for i := 0; i < workers; i++ {
    		go wait.UntilWithContext(ctx, gc.runAttemptToDeleteWorker, 1*time.Second)
    		go wait.Until(func() { gc.runAttemptToOrphanWorker(logger) }, 1*time.Second, ctx.Done())
    	}
    
    	<-ctx.Done()
    }
    
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Sat May 04 18:33:12 UTC 2024
    - 32.9K bytes
    - Viewed (0)
  9. pkg/controller/disruption/disruption.go

    		return
    	}
    
    	go wait.UntilWithContext(ctx, dc.worker, time.Second)
    	go wait.Until(dc.recheckWorker, time.Second, ctx.Done())
    	go wait.UntilWithContext(ctx, dc.stalePodDisruptionWorker, time.Second)
    
    	<-ctx.Done()
    }
    
    func (dc *DisruptionController) addDB(logger klog.Logger, obj interface{}) {
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Sat May 04 18:33:12 UTC 2024
    - 36.1K bytes
    - Viewed (0)
  10. pkg/scheduler/scheduler.go

    	// If there are no new pods to schedule, it will be hanging there
    	// and if done in this goroutine it will be blocking closing
    	// SchedulingQueue, in effect causing a deadlock on shutdown.
    	go wait.UntilWithContext(ctx, sched.ScheduleOne, 0)
    
    	<-ctx.Done()
    	sched.SchedulingQueue.Close()
    
    	// If the plugins satisfy the io.Closer interface, they are closed.
    	err := sched.Profiles.Close()
    	if err != nil {
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Tue Jun 04 06:20:55 UTC 2024
    - 20.8K bytes
    - Viewed (0)
Back to top