Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 133 for Await (0.12 sec)

  1. pkg/controller/replicaset/replica_set_test.go

    	}
    	// Start only the ReplicaSet watcher and the workqueue, send a watch event,
    	// and make sure it hits the sync method.
    	go wait.UntilWithContext(tCtx, manager.worker, 10*time.Millisecond)
    
    	testRSSpec.Name = "foo"
    	fakeWatch.Add(&testRSSpec)
    
    	select {
    	case <-received:
    	case <-time.After(wait.ForeverTestTimeout):
    		t.Errorf("unexpected timeout from result channel")
    	}
    }
    
    func TestWatchPods(t *testing.T) {
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Sat May 04 18:33:12 UTC 2024
    - 69.2K bytes
    - Viewed (0)
  2. src/net/dnsclient_unix_test.go

    					A: TestAddr,
    				},
    			},
    		}
    	}
    	return r, nil
    }}
    
    // Issue 13705: don't try to resolve onion addresses, etc
    func TestLookupTorOnion(t *testing.T) {
    	defer dnsWaitGroup.Wait()
    	r := Resolver{PreferGo: true, Dial: fakeDNSServerSuccessful.DialContext}
    	addrs, err := r.LookupIPAddr(context.Background(), "foo.onion.")
    	if err != nil {
    		t.Fatalf("lookup = %v; want nil", err)
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sun Apr 14 18:23:45 UTC 2024
    - 72.4K bytes
    - Viewed (0)
  3. cmd/erasure-server-pool.go

    		g.Go(func() error {
    			// Get the set where it would be placed.
    			storageInfos[index] = getDiskInfos(ctx, pool.getHashedSet(object).getDisks()...)
    			return nil
    		}, index)
    	}
    
    	// Wait for the go routines.
    	g.Wait()
    
    	for i, zinfo := range storageInfos {
    		if zinfo == nil {
    			serverPools[i] = poolAvailableSpace{Index: i}
    			continue
    		}
    		var available uint64
    		if !isMinioMetaBucketName(bucket) {
    Registered: Sun Jun 16 00:44:34 UTC 2024
    - Last Modified: Thu May 30 11:58:12 UTC 2024
    - 82.5K bytes
    - Viewed (0)
  4. src/runtime/mgcmark.go

    	startTime := nanotime()
    	trackLimiterEvent := gp.m.p.ptr().limiterEvent.start(limiterEventMarkAssist, startTime)
    
    	decnwait := atomic.Xadd(&work.nwait, -1)
    	if decnwait == work.nproc {
    		println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc)
    		throw("nwait > work.nprocs")
    	}
    
    	// gcDrainN requires the caller to be preemptible.
    	casGToWaitingForGC(gp, _Grunning, waitReasonGCAssistMarking)
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 18 21:25:11 UTC 2024
    - 52.5K bytes
    - Viewed (0)
  5. pkg/kubelet/pod_workers.go

    		// No error; requeue at the regular resync interval.
    		p.workQueue.Enqueue(podUID, wait.Jitter(p.resyncInterval, workerResyncIntervalJitterFactor))
    	case strings.Contains(syncErr.Error(), NetworkNotReadyErrorMsg):
    		// Network is not ready; back off for short period of time and retry as network might be ready soon.
    		p.workQueue.Enqueue(podUID, wait.Jitter(backOffOnTransientErrorPeriod, workerBackOffPeriodJitterFactor))
    	default:
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Tue Apr 02 13:22:37 UTC 2024
    - 74.8K bytes
    - Viewed (0)
  6. cmd/erasure-object.go

    			if err != nil {
    				return err
    			}
    			diskVersions[index] = resp.Sign
    			dataDirs[index] = resp.OldDataDir
    			return nil
    		}, index)
    	}
    
    	// Wait for all renames to finish.
    	errs := g.Wait()
    
    	err := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
    	if err != nil {
    		dg := errgroup.WithNErrs(len(disks))
    		for index, nerr := range errs {
    Registered: Sun Jun 16 00:44:34 UTC 2024
    - Last Modified: Mon Jun 10 15:51:27 UTC 2024
    - 78.6K bytes
    - Viewed (0)
  7. pkg/volume/csi/csi_attacher_test.go

    	ticker := time.NewTicker(10 * time.Millisecond)
    	var attach *storage.VolumeAttachment
    	var err error
    	defer ticker.Stop()
    	// wait for attachment to be saved
    	for i := 0; i < 100; i++ {
    		attach, err = client.StorageV1().VolumeAttachments().Get(context.TODO(), attachID, metav1.GetOptions{})
    		if err != nil {
    			attach = nil
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Thu Apr 18 12:23:16 UTC 2024
    - 58.1K bytes
    - Viewed (0)
  8. cmd/batch-handlers.go

    				// persist in-memory state to disk after every 10secs.
    				batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
    
    				if wait := globalBatchConfig.ReplicationWait(); wait > 0 {
    					time.Sleep(wait)
    				}
    			}()
    		}
    		wk.Wait()
    
    		ri.RetryAttempts = attempts
    		ri.Complete = ri.ObjectsFailed == 0
    		ri.Failed = ri.ObjectsFailed > 0
    
    		globalBatchJobsMetrics.save(job.ID, ri)
    Registered: Sun Jun 16 00:44:34 UTC 2024
    - Last Modified: Tue Jun 11 03:13:30 UTC 2024
    - 56K bytes
    - Viewed (0)
  9. staging/src/k8s.io/apiserver/pkg/storage/testing/watcher_tests.go

    	})
    	if err != nil {
    		t.Fatal(err)
    	}
    
    	select {
    	case _, ok := <-w.ResultChan():
    		if ok {
    			t.Error("ResultChan() should be closed")
    		}
    	case <-time.After(wait.ForeverTestTimeout):
    		t.Errorf("timeout after %v", wait.ForeverTestTimeout)
    	}
    }
    
    func RunTestWatcherTimeout(ctx context.Context, t *testing.T, store storage.Interface) {
    	// initialRV is used to initate the watcher at the beginning of the world.
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Wed Apr 24 18:25:29 UTC 2024
    - 63.8K bytes
    - Viewed (0)
  10. hack/local-up-cluster.sh

      kube::util::wait_for_success "$coredns_wait_time" "$interval_time" "$coredns_pods_ready"
      if [ $? == "1" ]; then
        echo "time out on waiting for coredns pods"
        exit 1
      fi
    
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Sat May 25 02:33:52 UTC 2024
    - 53.3K bytes
    - Viewed (0)
Back to top