Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 1,359 for measure (0.2 sec)

  1. src/runtime/metrics/doc.go

    		Estimated total CPU time spent performing GC tasks on spare CPU
    		resources that the Go scheduler could not otherwise find a use
    		for. This should be subtracted from the total GC CPU time to
    		obtain a measure of compulsory GC CPU time. This metric is an
    		overestimate, and not directly comparable to system CPU time
    		measurements. Compare only with other /cpu/classes metrics.
    
    	/cpu/classes/gc/pause:cpu-seconds
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:58:43 UTC 2024
    - 20K bytes
    - Viewed (0)
  2. src/testing/benchmark.go

    			n = min(n, 1e9)
    			b.runN(int(n))
    		}
    	}
    	b.result = BenchmarkResult{b.N, b.duration, b.bytes, b.netAllocs, b.netBytes, b.extra}
    }
    
    // Elapsed returns the measured elapsed time of the benchmark.
    // The duration reported by Elapsed matches the one measured by
    // [B.StartTimer], [B.StopTimer], and [B.ResetTimer].
    func (b *B) Elapsed() time.Duration {
    	d := b.duration
    	if b.timerOn {
    		d += highPrecisionTimeSince(b.start)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 01:00:11 UTC 2024
    - 23.9K bytes
    - Viewed (0)
  3. src/math/rand/v2/rand_test.go

    		for i := range p {
    			p[i] = i
    		}
    		r.Shuffle(30, func(i, j int) { p[i], p[j] = p[j], p[i] })
    		t += p[0]
    	}
    	Sink = uint64(t)
    }
    
    // BenchmarkShuffleOverhead uses a minimal swap function
    // to measure just the shuffling overhead.
    func BenchmarkShuffleOverhead(b *testing.B) {
    	r := testRand()
    	for n := b.N; n > 0; n-- {
    		r.Shuffle(30, func(i, j int) {
    			if i < 0 || i >= 30 || j < 0 || j >= 30 {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 18:42:28 UTC 2024
    - 17.4K bytes
    - Viewed (0)
  4. docs/en/docs/deployment/concepts.md

    You could put an **arbitrary number** to target, for example, something **between 50% to 90%** of resource utilization. The point is that those are probably the main things you will want to measure and use to tweak your deployments.
    
    Registered: Mon Jun 17 08:32:26 UTC 2024
    - Last Modified: Thu May 02 22:37:31 UTC 2024
    - 18K bytes
    - Viewed (0)
  5. src/cmd/vendor/golang.org/x/text/cases/map.go

    		// ::Any-Upper();
    		// ::NFC();
    
    		// TODO: See A.5. A soft-dotted rune never has an exception. This would
    		// allow us to overload the exception bit and encode this property in
    		// info. Need to measure performance impact of this.
    		r, _ := utf8.DecodeRune(c.src[c.pSrc:])
    		oldPDst := c.pDst
    		if !f(c) {
    			return false
    		}
    		if !unicode.Is(unicode.Soft_Dotted, r) {
    			return true
    		}
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Jan 24 13:01:26 UTC 2024
    - 22.7K bytes
    - Viewed (0)
  6. src/compress/gzip/gunzip_test.go

    			"the great task remaining before us — that from these honored\n" +
    			"dead we take increased devotion to that cause for which they\n" +
    			"gave the last full measure of devotion —\n" +
    			"  that we here highly resolve that these dead shall not have\n" +
    			"died in vain — that this nation, under God, shall have a new\n" +
    			"birth of freedom — and that government of the people, by the\n" +
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Jul 12 15:06:07 UTC 2022
    - 19.5K bytes
    - Viewed (0)
  7. okhttp/src/main/kotlin/okhttp3/Cache.kt

     * may cause corruption or runtime errors if not. It may however be shared amongst multiple OkHttpClient
     * instances.
     *
     * ## Cache Optimization
     *
     * To measure cache effectiveness, this class tracks three statistics:
     *
     *  * **[Request Count:][requestCount]** the number of HTTP requests issued since this cache was
     *    created.
    Registered: Sun Jun 16 04:42:17 UTC 2024
    - Last Modified: Wed Apr 10 19:46:48 UTC 2024
    - 26.8K bytes
    - Viewed (0)
  8. src/runtime/metrics_test.go

    	stop := applyGCLoad(b)
    
    	// Spend this much time measuring latencies.
    	latencies := make([]time.Duration, 0, 1024)
    	_, samples := prepareAllMetricsSamples()
    
    	// Hit metrics.Read continuously and measure.
    	b.ResetTimer()
    	for i := 0; i < b.N; i++ {
    		start := time.Now()
    		metrics.Read(samples)
    		latencies = append(latencies, time.Since(start))
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:52:17 UTC 2024
    - 45K bytes
    - Viewed (0)
  9. platforms/documentation/docs/src/docs/userguide/optimizing-performance/build-cache/build_cache.adoc

    In order to ensure that the inputs and outputs are properly declared use integration tests (for example using TestKit) to check that a task produces the same outputs for identical inputs and captures all output files for the task.
    Registered: Wed Jun 12 18:38:38 UTC 2024
    - Last Modified: Wed May 15 11:30:10 UTC 2024
    - 26.1K bytes
    - Viewed (0)
  10. staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go

    func RecordWatchListLatency(ctx context.Context, gvr schema.GroupVersionResource, metricsScope string) {
    	requestReceivedTimestamp, ok := request.ReceivedTimestampFrom(ctx)
    	if !ok {
    		utilruntime.HandleError(fmt.Errorf("unable to measure watchlist latency because no received ts found in the ctx, gvr: %s", gvr))
    		return
    	}
    	elapsedSeconds := time.Since(requestReceivedTimestamp).Seconds()
    
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Wed Sep 27 07:29:19 UTC 2023
    - 35K bytes
    - Viewed (0)
Back to top