- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 32 for WaitForNamedCacheSync (0.57 sec)
-
pkg/proxy/config/config.go
} // Run waits for cache synced and invokes handlers after syncing. func (c *EndpointSliceConfig) Run(stopCh <-chan struct{}) { c.logger.Info("Starting endpoint slice config controller") if !cache.WaitForNamedCacheSync("endpoint slice config", stopCh, c.listerSynced) { return } for _, h := range c.eventHandlers { c.logger.V(3).Info("Calling handler.OnEndpointSlicesSynced()") h.OnEndpointSlicesSynced() }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Mon Apr 22 05:08:41 UTC 2024 - 15.1K bytes - Viewed (0) -
pkg/controller/volume/attachdetach/attach_detach_controller_test.go
} for _, csiNode := range csiNodes.Items { csiNodeToAdd := csiNode csiNodeInformer.GetIndexer().Add(&csiNodeToAdd) } informerFactory.Start(tCtx.Done()) if !kcache.WaitForNamedCacheSync("attach detach", tCtx.Done(), informerFactory.Core().V1().Pods().Informer().HasSynced, informerFactory.Core().V1().Nodes().Informer().HasSynced, informerFactory.Storage().V1().CSINodes().Informer().HasSynced) {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Thu Apr 18 11:00:37 UTC 2024 - 24.2K bytes - Viewed (0) -
pkg/controller/resourcequota/resource_quota_controller.go
logger.Info("Starting resource quota controller") defer logger.Info("Shutting down resource quota controller") if rq.quotaMonitor != nil { go rq.quotaMonitor.Run(ctx) } if !cache.WaitForNamedCacheSync("resource quota", ctx.Done(), rq.informerSyncedFuncs...) { return } // the workers that chug through the quota calculation backlog for i := 0; i < workers; i++ {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 21.3K bytes - Viewed (0) -
pkg/controller/serviceaccount/legacy_serviceaccount_token_cleaner.go
logger := klog.FromContext(ctx) logger.Info("Starting legacy service account token cleaner controller") defer logger.Info("Shutting down legacy service account token cleaner controller") if !cache.WaitForNamedCacheSync("legacy-service-account-token-cleaner", ctx.Done(), tc.saInformerSynced, tc.secretInformerSynced, tc.podInformerSynced) { return } go wait.UntilWithContext(ctx, tc.evaluateSATokens, tc.syncInterval)
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Fri Oct 27 03:52:06 UTC 2023 - 10K bytes - Viewed (0) -
staging/src/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader_controller.go
klog.Infof("Starting %s", c.name) defer klog.Infof("Shutting down %s", c.name) go c.configmapInformer.Run(ctx.Done()) // wait for caches to fill before starting your work if !cache.WaitForNamedCacheSync(c.name, ctx.Done(), c.configmapInformerSynced) { return } // doesn't matter what workers say, only start one. go wait.Until(c.runWorker, time.Second, ctx.Done()) <-ctx.Done() }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 10.9K bytes - Viewed (0) -
pkg/kubemark/controller.go
return controller, nil } // WaitForCacheSync waits until all caches in the controller are populated. func (kubemarkController *KubemarkController) WaitForCacheSync(stopCh chan struct{}) bool { return cache.WaitForNamedCacheSync("kubemark", stopCh, kubemarkController.externalCluster.rcSynced, kubemarkController.externalCluster.podSynced, kubemarkController.kubemarkCluster.nodeSynced) }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Fri Jul 17 23:02:17 UTC 2020 - 14.1K bytes - Viewed (0) -
staging/src/k8s.io/apiserver/pkg/util/peerproxy/peerproxy_handler.go
ctx context.Context } func (h *peerProxyHandler) HasFinishedSync() bool { return h.finishedSync.Load() } func (h *peerProxyHandler) WaitForCacheSync(stopCh <-chan struct{}) error { ok := cache.WaitForNamedCacheSync("unknown-version-proxy", stopCh, h.storageversionInformer.HasSynced, h.storageversionManager.Completed) if !ok { return fmt.Errorf("error while waiting for initial cache sync") }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed Jul 19 00:36:22 UTC 2023 - 11.5K bytes - Viewed (0) -
pkg/controller/ttlafterfinished/ttlafterfinished_controller.go
defer tc.queue.ShutDown() logger := klog.FromContext(ctx) logger.Info("Starting TTL after finished controller") defer logger.Info("Shutting down TTL after finished controller") if !cache.WaitForNamedCacheSync("TTL after finished", ctx.Done(), tc.jListerSynced) { return } for i := 0; i < workers; i++ { go wait.UntilWithContext(ctx, tc.worker, time.Second) } <-ctx.Done() }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue May 07 23:59:28 UTC 2024 - 10.4K bytes - Viewed (0) -
pkg/controller/podgc/gc_controller.go
logger := klog.FromContext(ctx) defer utilruntime.HandleCrash() logger.Info("Starting GC controller") defer gcc.nodeQueue.ShutDown() defer logger.Info("Shutting down GC controller") if !cache.WaitForNamedCacheSync("GC", ctx.Done(), gcc.podListerSynced, gcc.nodeListerSynced) { return } go wait.UntilWithContext(ctx, gcc.gc, gcc.gcCheckPeriod) <-ctx.Done() }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 13.3K bytes - Viewed (0) -
pkg/controller/volume/pvcprotection/pvc_protection_controller.go
defer c.queue.ShutDown() logger := klog.FromContext(ctx) logger.Info("Starting PVC protection controller") defer logger.Info("Shutting down PVC protection controller") if !cache.WaitForNamedCacheSync("PVC protection", ctx.Done(), c.pvcListerSynced, c.podListerSynced) { return } for i := 0; i < workers; i++ { go wait.UntilWithContext(ctx, c.runWorker, time.Second) } <-ctx.Done() }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 14K bytes - Viewed (0)