- Sort Score
- Result 10 results
- Languages All
Results 1 - 7 of 7 for HandleCrash (0.43 sec)
-
pkg/kubelet/kuberuntime/kuberuntime_container.go
start := metav1.Now() done := make(chan struct{}) go func() { defer close(done) defer utilruntime.HandleCrash() if _, err := m.runner.Run(ctx, containerID, pod, containerSpec, containerSpec.Lifecycle.PreStop); err != nil { klog.ErrorS(err, "PreStop hook failed", "pod", klog.KObj(pod), "podUID", pod.UID,
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Jun 04 06:25:43 UTC 2024 - 54.7K bytes - Viewed (0) -
staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go
workersExited := make(chan struct{}) wg.Add(workersNumber) for i := 0; i < workersNumber; i++ { go func() { // panics don't cross goroutine boundaries defer utilruntime.HandleCrash(func(panicReason interface{}) { errs <- fmt.Errorf("DeleteCollection goroutine panicked: %v", panicReason) }) defer wg.Done() for item := range toProcess {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Fri Jan 19 23:22:44 UTC 2024 - 60.8K bytes - Viewed (0) -
pkg/controller/daemon/daemon_controller.go
dsc.queue.Add(key) } // Run begins watching and syncing daemon sets. func (dsc *DaemonSetsController) Run(ctx context.Context, workers int) { defer utilruntime.HandleCrash() dsc.eventBroadcaster.StartStructuredLogging(3) dsc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: dsc.kubeClient.CoreV1().Events("")}) defer dsc.eventBroadcaster.Shutdown()
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 51.3K bytes - Viewed (0) -
pkg/controller/nodelifecycle/node_lifecycle_controller.go
return nc, nil } // Run starts an asynchronous loop that monitors the status of cluster nodes. func (nc *Controller) Run(ctx context.Context) { defer utilruntime.HandleCrash() // Start events processing pipeline. nc.broadcaster.StartStructuredLogging(3) logger := klog.FromContext(ctx) logger.Info("Sending events to api server") nc.broadcaster.StartRecordingToSink(
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 51.6K bytes - Viewed (0) -
pkg/controller/podautoscaler/horizontal.go
monitor.Register() return hpaController } // Run begins watching and syncing. func (a *HorizontalController) Run(ctx context.Context, workers int) { defer utilruntime.HandleCrash() defer a.queue.ShutDown() logger := klog.FromContext(ctx) logger.Info("Starting HPA controller") defer logger.Info("Shutting down HPA controller")
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 63.6K bytes - Viewed (0) -
pkg/controller/job/job_controller.go
metrics.Register() return jm, nil } // Run the main goroutine responsible for watching and syncing jobs. func (jm *Controller) Run(ctx context.Context, workers int) { defer utilruntime.HandleCrash() logger := klog.FromContext(ctx) // Start events processing pipeline. jm.broadcaster.StartStructuredLogging(3)
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Mon Jun 10 23:56:37 UTC 2024 - 77.6K bytes - Viewed (0) -
pkg/kubelet/pod_workers.go
} else { outCh = podUpdates } // spawn a pod worker go func() { // TODO: this should be a wait.Until with backoff to handle panics, and // accept a context for shutdown defer runtime.HandleCrash() defer klog.V(3).InfoS("Pod worker has stopped", "podUID", uid) p.podWorkerLoop(uid, outCh) }() } // measure the maximum latency between a call to UpdatePod and when the pod worker reacts to it
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Apr 02 13:22:37 UTC 2024 - 74.8K bytes - Viewed (0)