- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 54 for sync_bg (0.14 sec)
-
pkg/controller/deployment/deployment_controller.go
logger.Error(err, "Failed to split meta namespace cache key", "cacheKey", key) return err } startTime := time.Now() logger.V(4).Info("Started syncing deployment", "deployment", klog.KRef(namespace, name), "startTime", startTime) defer func() { logger.V(4).Info("Finished syncing deployment", "deployment", klog.KRef(namespace, name), "duration", time.Since(startTime)) }() deployment, err := dc.dLister.Deployments(namespace).Get(name)
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 24.2K bytes - Viewed (0) -
pkg/kube/multicluster/secretcontroller.go
configCluster := "syncing" if kube.AllSynced(c.configClusterSyncers) { configCluster = "synced" } out := []cluster.DebugInfo{{ ID: c.configClusterID, SyncStatus: configCluster, }} // Append each cluster derived from secrets for secretName, clusters := range c.cs.All() { for clusterID, c := range clusters { syncStatus := "syncing" if c.Closed() {
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Wed Mar 06 02:13:10 UTC 2024 - 12.7K bytes - Viewed (0) -
pkg/controller/disruption/disruption.go
} utilruntime.HandleError(fmt.Errorf("error syncing Pod %v to clear DisruptionTarget condition, requeueing: %w", key, err)) dc.stalePodDisruptionQueue.AddRateLimited(key) return true } func (dc *DisruptionController) sync(ctx context.Context, key string) error { logger := klog.FromContext(ctx) startTime := dc.clock.Now() defer func() {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 36.1K bytes - Viewed (0) -
pkg/controller/garbagecollector/garbagecollector.go
// after the next discovery sync. // For now, record the error and retry. logger.V(5).Error(err, "error syncing item", "item", n.identity) } else { utilruntime.HandleError(fmt.Errorf("error syncing item %s: %v", n, err)) } // retry if garbage collection of an object failed. return requeueItem } else if !n.isObserved() {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 32.9K bytes - Viewed (0) -
pilot/pkg/serviceregistry/kube/controller/network.go
if err := c.syncPods(); err != nil { log.Errorf("one or more errors force-syncing pods: %v", err) } if err := c.endpoints.initializeNamespace(metav1.NamespaceAll, true); err != nil { log.Errorf("one or more errors force-syncing endpoints: %v", err) } c.reloadNetworkGateways()
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Fri May 31 22:23:22 UTC 2024 - 15.4K bytes - Viewed (0) -
pkg/controller/nodeipam/ipam/range_allocator.go
// Foo resource to be synced. if err := r.syncNode(ctx, key); err != nil { // Put the item back on the queue to handle any transient errors. r.queue.AddRateLimited(key) return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error()) } // Finally, if no error occurs we Forget this item so it does not // get queue again until another change happens. r.queue.Forget(obj)
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed Apr 24 10:06:15 UTC 2024 - 16.2K bytes - Viewed (0) -
staging/src/k8s.io/apiserver/pkg/admission/plugin/policy/generic/policy_source.go
// listing all policies at all, we would want to wipe the list. s.policies.Store(&policies) if err != nil { // An error was generated while syncing policies. Mark it as dirty again // so we can retry later utilruntime.HandleError(fmt.Errorf("encountered error syncing policies: %w. Rescheduling policy sync", err)) s.notify() } } func (s *policySource[P, B, E]) notify() { s.policiesDirty.Store(true) }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed Feb 21 23:07:34 UTC 2024 - 15.2K bytes - Viewed (0) -
pkg/registry/core/service/ipallocator/controller/repairip.go
} func (r *RepairIPAddress) handleSvcErr(err error, key string) { if err == nil { r.svcQueue.Forget(key) return } if r.svcQueue.NumRequeues(key) < maxRetries { klog.V(2).InfoS("Error syncing Service, retrying", "service", key, "err", err) r.svcQueue.AddRateLimited(key) return } klog.Warningf("Dropping Service %q out of the queue: %v", key, err) r.svcQueue.Forget(key)
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 24.7K bytes - Viewed (0) -
docs/bucket/replication/README.md
To disable replica metadata modification syncing, use `mc replicate edit` with the --replicate flag. ``` mc replicate edit alias/bucket --id xyz.id --replicate "delete,delete-marker" ``` To re-enable replica metadata modification syncing, ``` mc replicate edit alias/bucket --id xyz.id --replicate "delete,delete-marker,replica-metadata-sync" ```
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Tue Jan 24 23:46:33 UTC 2023 - 18.2K bytes - Viewed (0) -
pkg/controller/certificates/signer/signer.go
ctx, controllerName, client, csrInformer, signer.handle, ), dynamicCertReloader: signer.caProvider.caLoader, }, nil } // Run the main goroutine responsible for watching and syncing jobs. func (c *CSRSigningController) Run(ctx context.Context, workers int) { go c.dynamicCertReloader.Run(ctx, workers) c.certificateController.Run(ctx, workers) }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed Mar 15 03:26:08 UTC 2023 - 10.3K bytes - Viewed (0)