- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 42 for UntilWithContext (0.34 sec)
-
pkg/controller/resourcequota/resource_quota_monitor.go
// shutted down, so we need to shut it down in a separate goroutine. go func() { defer utilruntime.HandleCrash() defer qm.resourceChanges.ShutDown() <-ctx.Done() }() wait.UntilWithContext(ctx, qm.runProcessResourceChanges, 1*time.Second) // Stop any running monitors. qm.monitorLock.Lock() defer qm.monitorLock.Unlock() monitors := qm.monitors stopped := 0
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 11.6K bytes - Viewed (0) -
pkg/controller/podgc/gc_controller.go
defer gcc.nodeQueue.ShutDown() defer logger.Info("Shutting down GC controller") if !cache.WaitForNamedCacheSync("GC", ctx.Done(), gcc.podListerSynced, gcc.nodeListerSynced) { return } go wait.UntilWithContext(ctx, gcc.gc, gcc.gcCheckPeriod) <-ctx.Done() } func (gcc *PodGCController) gc(ctx context.Context) { pods, err := gcc.podLister.List(labels.Everything()) if err != nil {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 13.3K bytes - Viewed (0) -
pkg/controller/volume/pvcprotection/pvc_protection_controller.go
if !cache.WaitForNamedCacheSync("PVC protection", ctx.Done(), c.pvcListerSynced, c.podListerSynced) { return } for i := 0; i < workers; i++ { go wait.UntilWithContext(ctx, c.runWorker, time.Second) } <-ctx.Done() } func (c *Controller) runWorker(ctx context.Context) { for c.processNextWorkItem(ctx) { } }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 14K bytes - Viewed (0) -
pkg/controller/volume/expand/expand_controller.go
defer logger.Info("Shutting down expand controller") if !cache.WaitForNamedCacheSync("expand", ctx.Done(), expc.pvcsSynced) { return } for i := 0; i < defaultWorkerCount; i++ { go wait.UntilWithContext(ctx, expc.runWorker, time.Second) } <-ctx.Done() } func (expc *expandController) runWorker(ctx context.Context) { for expc.processNextWorkItem(ctx) { } }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 16.2K bytes - Viewed (0) -
pkg/controller/disruption/disruption.go
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 36.1K bytes - Viewed (0) -
pkg/controller/garbagecollector/garbagecollector.go
}) { return } logger.Info("All resource monitors have synced. Proceeding to collect garbage") // gc workers for i := 0; i < workers; i++ { go wait.UntilWithContext(ctx, gc.runAttemptToDeleteWorker, 1*time.Second) go wait.Until(func() { gc.runAttemptToOrphanWorker(logger) }, 1*time.Second, ctx.Done()) } <-ctx.Done() }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 32.9K bytes - Viewed (0) -
pkg/scheduler/scheduler.go
// If there are no new pods to schedule, it will be hanging there // and if done in this goroutine it will be blocking closing // SchedulingQueue, in effect causing a deadlock on shutdown. go wait.UntilWithContext(ctx, sched.ScheduleOne, 0) <-ctx.Done() sched.SchedulingQueue.Close() // If the plugins satisfy the io.Closer interface, they are closed. err := sched.Profiles.Close() if err != nil {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Jun 04 06:20:55 UTC 2024 - 20.8K bytes - Viewed (0) -
pkg/controller/servicecidrs/servicecidrs_controller.go
if !cache.WaitForNamedCacheSync(controllerName, ctx.Done(), c.serviceCIDRsSynced, c.ipAddressSynced) { return } for i := 0; i < workers; i++ { go wait.UntilWithContext(ctx, c.worker, c.workerLoopPeriod) } <-ctx.Done() } func (c *Controller) addServiceCIDR(obj interface{}) { cidr, ok := obj.(*networkingapiv1alpha1.ServiceCIDR) if !ok { return }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 18K bytes - Viewed (0) -
pkg/controller/nodeipam/ipam/range_allocator.go
defer logger.Info("Shutting down range CIDR allocator") if !cache.WaitForNamedCacheSync("cidrallocator", ctx.Done(), r.nodesSynced) { return } for i := 0; i < cidrUpdateWorkers; i++ { go wait.UntilWithContext(ctx, r.runWorker, time.Second) } <-ctx.Done() } // runWorker is a long-running function that will continually call the // processNextWorkItem function in order to read and process a message on the
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed Apr 24 10:06:15 UTC 2024 - 16.2K bytes - Viewed (0) -
staging/src/k8s.io/apimachinery/pkg/util/wait/wait_test.go
} func TestUntilWithContext(t *testing.T) { ctx, cancel := context.WithCancel(context.TODO()) cancel() UntilWithContext(ctx, func(context.Context) { t.Fatal("should not have been invoked") }, 0) ctx, cancel = context.WithCancel(context.TODO()) called := make(chan struct{}) go func() { UntilWithContext(ctx, func(context.Context) { called <- struct{}{} }, 0) close(called) }() <-called cancel()
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Mar 26 16:28:45 UTC 2024 - 41.1K bytes - Viewed (0)