- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 33 for workerCh (0.26 sec)
-
src/runtime/mgcmark.go
// mark workers in retake. That might be simpler than trying to // enumerate all the reasons why we might want to preempt, even // if we're supposed to be mostly non-preemptible. for !(gp.preempt && (preemptible || sched.gcwaiting.Load() || pp.runSafePointFn != 0)) { // Try to keep work available on the global queue. We used to // check if there were waiting workers, but it's better to
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 18 21:25:11 UTC 2024 - 52.5K bytes - Viewed (0) -
pkg/scheduler/framework/plugins/dynamicresources/dynamicresources_test.go
PodResourceClaims(v1.PodResourceClaim{Name: resourceName2, Source: v1.ClaimSource{ResourceClaimName: &claimName2}}). Obj() workerNode = &st.MakeNode().Name("worker").Label("kubernetes.io/hostname", "worker").Node workerNodeSlice = st.MakeResourceSlice("worker", "some-driver").NamedResourcesInstances("instance-1").Obj() claimParameters = st.MakeClaimParameters().Name(claimName).Namespace(namespace).
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Mon May 27 07:57:10 UTC 2024 - 61.9K bytes - Viewed (0) -
staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go
case <-workersExited: klog.V(4).InfoS("workers already exited, and there are some items waiting to be processed", "queued/finished", i, "total", processedItems+len(newItems)) // Try to propagate an error from the workers if possible. select { case err := <-errs: return nil, err default: return nil, fmt.Errorf("all DeleteCollection workers exited") } } }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Fri Jan 19 23:22:44 UTC 2024 - 60.8K bytes - Viewed (0) -
pkg/controller/podautoscaler/horizontal.go
func (a *HorizontalController) Run(ctx context.Context, workers int) { defer utilruntime.HandleCrash() defer a.queue.ShutDown() logger := klog.FromContext(ctx) logger.Info("Starting HPA controller") defer logger.Info("Shutting down HPA controller") if !cache.WaitForNamedCacheSync("HPA", ctx.Done(), a.hpaListerSynced, a.podListerSynced) { return } for i := 0; i < workers; i++ {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 63.6K bytes - Viewed (0) -
docs/metrics/prometheus/grafana/replication/minio-replication-node.json
"interval": "1m", "intervalFactor": 2, "legendFormat": "{{server}}", "refId": "A" } ], "title": "Avg. Active Workers", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": {
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Thu Jun 13 22:26:54 UTC 2024 - 57.4K bytes - Viewed (0) -
pkg/controller/job/job_controller.go
return } jm.orphanQueue.Add(key) } // worker runs a worker thread that just dequeues items, processes them, and marks them done. // It enforces that the syncHandler is never invoked concurrently with the same key. func (jm *Controller) worker(ctx context.Context) { for jm.processNextWorkItem(ctx) { } }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Mon Jun 10 23:56:37 UTC 2024 - 77.6K bytes - Viewed (0) -
src/runtime/pprof/pprof_test.go
for _, tc := range []struct { name string workers int }{ { name: "serial", workers: 1, }, { name: "parallel", workers: runtime.GOMAXPROCS(0), }, } { // check that the OS's perspective matches what the Go runtime measures. t.Run(tc.name, func(t *testing.T) { t.Logf("Running with %d workers", tc.workers) var userTime, systemTime time.Duration
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 18:42:28 UTC 2024 - 68.8K bytes - Viewed (0) -
pkg/controller/volume/attachdetach/reconciler/reconciler_test.go
podName1 := "pod-uid1" volumeName1 := v1.UniqueVolumeName("volume-name1") volumeSpec1 := controllervolumetesting.GetTestVolumeSpec(string(volumeName1), volumeName1) nodeName1 := k8stypes.NodeName("worker-0") node1 := &v1.Node{ ObjectMeta: metav1.ObjectMeta{Name: string(nodeName1)}, Spec: v1.NodeSpec{ Taints: []v1.Taint{{Key: v1.TaintNodeOutOfService, Effect: v1.TaintEffectNoExecute}}, }, }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Thu Apr 18 07:00:14 UTC 2024 - 72.8K bytes - Viewed (0) -
platforms/documentation/docs/src/docs/userguide/optimizing-performance/configuration_cache.adoc
To work around this, you can use the <<worker_api.adoc#tasks_parallel_worker, Worker API>> with classloader or process isolation to encapsulate the library code. The bytecode of the worker's classpath is not modified, so the self-checks should pass. When process isolation is used, the worker action is executed in a separate worker process that doesn't have the Gradle Java agent installed.
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Fri Mar 29 16:24:12 UTC 2024 - 71.1K bytes - Viewed (0) -
.bazelrc
build:rbe_base --jobs=800 build:rbe_base --remote_executor=grpcs://remotebuildexecution.googleapis.com build:rbe_base --remote_timeout=3600 build:rbe_base --spawn_strategy=remote,worker,standalone,local # Attempt to minimize the amount of data transfer between bazel and the remote # workers: build:rbe_base --remote_download_toplevel test:rbe_base --test_env=USER=anon # TODO(kanglan): Check if we want to merge rbe_linux into rbe_linux_cpu.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 17:12:54 UTC 2024 - 52.9K bytes - Viewed (0)