- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 24 for workerCh (0.14 sec)
-
src/runtime/mgc.go
// and there are no more workers (note that, since this is // concurrent, this may be a transient state, but mark // termination will clean it up). Between background workers // and assists, we don't really know how many workers there // will be, so we pretend to have an arbitrarily large number // of workers, almost all of which are "waiting". While a // worker is working it decrements nwait. If nproc == nwait,
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 16:25:21 UTC 2024 - 62K bytes - Viewed (0) -
pkg/kubelet/pod_workers_test.go
} stillWorking := false // ignore held workers w.lock.Lock() for uid := range w.holds { pausedWorkers[uid] = struct{}{} } w.lock.Unlock() // check for at least one still working non-paused worker w.w.podLock.Lock() for uid, worker := range w.w.podSyncStatuses { if _, ok := pausedWorkers[uid]; ok { continue } if worker.working { stillWorking = true break
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Jun 04 06:25:43 UTC 2024 - 75.6K bytes - Viewed (0) -
.teamcity/test-buckets.json
"ivy", "testing-native", "maven", "integ-test" ] }, { "parallelizationMethod":{ "name":"TestDistribution" }, "subprojects":[ "workers", "model-core", "language-groovy", "build-init", "logging", "plugins-groovy", "kotlin-dsl", "plugins-java", "samples", "plugin-development",
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Mon Jun 10 15:56:44 UTC 2024 - 54.2K bytes - Viewed (0) -
cmd/batch-handlers.go
// newBatchJobPool creates a pool of job manifest workers of specified size func newBatchJobPool(ctx context.Context, o ObjectLayer, workers int) *BatchJobPool { jpool := &BatchJobPool{ ctx: ctx, objLayer: o, jobCh: make(chan *BatchJobRequest, 10000), workerKillCh: make(chan struct{}, workers), jobCancelers: make(map[string]context.CancelFunc), }
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Tue Jun 11 03:13:30 UTC 2024 - 56K bytes - Viewed (0) -
pkg/controller/nodelifecycle/node_lifecycle_controller.go
nodeNameKeyIndex = "spec.nodeName" // podUpdateWorkerSizes assumes that in most cases pod will be handled by monitorNodeHealth pass. // Pod update workers will only handle lagging cache pods. 4 workers should be enough. podUpdateWorkerSize = 4 // nodeUpdateWorkerSize defines the size of workers for node update or/and pod update. nodeUpdateWorkerSize = 8 // taintEvictionController is defined here in order to prevent imports of
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 51.6K bytes - Viewed (0) -
src/runtime/mgcmark.go
// mark workers in retake. That might be simpler than trying to // enumerate all the reasons why we might want to preempt, even // if we're supposed to be mostly non-preemptible. for !(gp.preempt && (preemptible || sched.gcwaiting.Load() || pp.runSafePointFn != 0)) { // Try to keep work available on the global queue. We used to // check if there were waiting workers, but it's better to
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 18 21:25:11 UTC 2024 - 52.5K bytes - Viewed (0) -
pkg/scheduler/framework/plugins/dynamicresources/dynamicresources_test.go
PodResourceClaims(v1.PodResourceClaim{Name: resourceName2, Source: v1.ClaimSource{ResourceClaimName: &claimName2}}). Obj() workerNode = &st.MakeNode().Name("worker").Label("kubernetes.io/hostname", "worker").Node workerNodeSlice = st.MakeResourceSlice("worker", "some-driver").NamedResourcesInstances("instance-1").Obj() claimParameters = st.MakeClaimParameters().Name(claimName).Namespace(namespace).
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Mon May 27 07:57:10 UTC 2024 - 61.9K bytes - Viewed (0) -
pkg/controller/podautoscaler/horizontal.go
func (a *HorizontalController) Run(ctx context.Context, workers int) { defer utilruntime.HandleCrash() defer a.queue.ShutDown() logger := klog.FromContext(ctx) logger.Info("Starting HPA controller") defer logger.Info("Shutting down HPA controller") if !cache.WaitForNamedCacheSync("HPA", ctx.Done(), a.hpaListerSynced, a.podListerSynced) { return } for i := 0; i < workers; i++ {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 63.6K bytes - Viewed (0) -
docs/metrics/prometheus/grafana/replication/minio-replication-node.json
"interval": "1m", "intervalFactor": 2, "legendFormat": "{{server}}", "refId": "A" } ], "title": "Avg. Active Workers", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": {
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Thu Jun 13 22:26:54 UTC 2024 - 57.4K bytes - Viewed (0) -
pkg/controller/job/job_controller.go
return } jm.orphanQueue.Add(key) } // worker runs a worker thread that just dequeues items, processes them, and marks them done. // It enforces that the syncHandler is never invoked concurrently with the same key. func (jm *Controller) worker(ctx context.Context) { for jm.processNextWorkItem(ctx) { } }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Mon Jun 10 23:56:37 UTC 2024 - 77.6K bytes - Viewed (0)