- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 23 for UntilWithContext (0.3 sec)
-
pkg/controller/resourcequota/resource_quota_controller.go
for i := 0; i < workers; i++ { go wait.UntilWithContext(ctx, rq.worker(rq.queue), time.Second) go wait.UntilWithContext(ctx, rq.worker(rq.missingUsageQueue), time.Second) } // the timer for how often we do a full recalculation across all quotas if rq.resyncPeriod() > 0 { go wait.UntilWithContext(ctx, rq.enqueueAll, rq.resyncPeriod()) } else {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 21.3K bytes - Viewed (0) -
pkg/controller/storageversiongc/gc_controller.go
// handles storageversion creation/update with non-existing id. The latter should rarely // happen. It's okay for the two workers to conflict on update. go wait.UntilWithContext(ctx, c.runLeaseWorker, time.Second) go wait.UntilWithContext(ctx, c.runStorageVersionWorker, time.Second) <-ctx.Done() } func (c *Controller) runLeaseWorker(ctx context.Context) { for c.processNextLease(ctx) { } }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 10K bytes - Viewed (0) -
pkg/controller/serviceaccount/tokens_controller.go
return } logger := klog.FromContext(ctx) logger.V(5).Info("Starting workers") for i := 0; i < workers; i++ { go wait.UntilWithContext(ctx, e.syncServiceAccount, 0) go wait.UntilWithContext(ctx, e.syncSecret, 0) } <-ctx.Done() logger.V(1).Info("Shutting down") } func (e *TokensController) queueServiceAccountSync(obj interface{}) {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 20.5K bytes - Viewed (0) -
pkg/controller/volume/persistentvolume/pv_controller_base.go
return } ctrl.initializeCaches(logger, ctrl.volumeLister, ctrl.claimLister) go wait.Until(func() { ctrl.resync(ctx) }, ctrl.resyncPeriod, ctx.Done()) go wait.UntilWithContext(ctx, ctrl.volumeWorker, time.Second) go wait.UntilWithContext(ctx, ctrl.claimWorker, time.Second) metrics.Register(ctrl.volumes.store, ctrl.claims, &ctrl.volumePluginMgr) <-ctx.Done() }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Fri May 10 08:42:31 UTC 2024 - 29.5K bytes - Viewed (0) -
pkg/controller/ttlafterfinished/ttlafterfinished_controller.go
if !cache.WaitForNamedCacheSync("TTL after finished", ctx.Done(), tc.jListerSynced) { return } for i := 0; i < workers; i++ { go wait.UntilWithContext(ctx, tc.worker, time.Second) } <-ctx.Done() } func (tc *Controller) addJob(logger klog.Logger, obj interface{}) { job := obj.(*batch.Job) logger.V(4).Info("Adding job", "job", klog.KObj(job))
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue May 07 23:59:28 UTC 2024 - 10.4K bytes - Viewed (0) -
pkg/controller/resourcequota/resource_quota_monitor.go
// shutted down, so we need to shut it down in a separate goroutine. go func() { defer utilruntime.HandleCrash() defer qm.resourceChanges.ShutDown() <-ctx.Done() }() wait.UntilWithContext(ctx, qm.runProcessResourceChanges, 1*time.Second) // Stop any running monitors. qm.monitorLock.Lock() defer qm.monitorLock.Unlock() monitors := qm.monitors stopped := 0
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 11.6K bytes - Viewed (0) -
pkg/controller/podgc/gc_controller.go
defer gcc.nodeQueue.ShutDown() defer logger.Info("Shutting down GC controller") if !cache.WaitForNamedCacheSync("GC", ctx.Done(), gcc.podListerSynced, gcc.nodeListerSynced) { return } go wait.UntilWithContext(ctx, gcc.gc, gcc.gcCheckPeriod) <-ctx.Done() } func (gcc *PodGCController) gc(ctx context.Context) { pods, err := gcc.podLister.List(labels.Everything()) if err != nil {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 13.3K bytes - Viewed (0) -
pkg/controller/volume/pvcprotection/pvc_protection_controller.go
if !cache.WaitForNamedCacheSync("PVC protection", ctx.Done(), c.pvcListerSynced, c.podListerSynced) { return } for i := 0; i < workers; i++ { go wait.UntilWithContext(ctx, c.runWorker, time.Second) } <-ctx.Done() } func (c *Controller) runWorker(ctx context.Context) { for c.processNextWorkItem(ctx) { } }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 14K bytes - Viewed (0) -
pkg/controller/volume/expand/expand_controller.go
defer logger.Info("Shutting down expand controller") if !cache.WaitForNamedCacheSync("expand", ctx.Done(), expc.pvcsSynced) { return } for i := 0; i < defaultWorkerCount; i++ { go wait.UntilWithContext(ctx, expc.runWorker, time.Second) } <-ctx.Done() } func (expc *expandController) runWorker(ctx context.Context) { for expc.processNextWorkItem(ctx) { } }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 16.2K bytes - Viewed (0) -
pkg/controller/disruption/disruption.go
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 36.1K bytes - Viewed (0)