- Sort Score
- Result 10 results
- Languages All
Results 1 - 8 of 8 for AVG (0.05 sec)
-
cmd/bucket-stats.go
qs.TgtXferStats[arn][Large] = XferStats{ Avg: v.XferRateLrg.Avg, Curr: lcurrTgt, Peak: math.Max(v.XferRateLrg.Peak, lcurrTgt), } qs.TgtXferStats[arn][Small] = XferStats{ Avg: v.XferRateSml.Avg, Curr: scurrTgt, Peak: math.Max(v.XferRateSml.Peak, scurrTgt), } if tcount > 0 { qs.TgtXferStats[arn][Total] = XferStats{ Avg: (v.XferRateLrg.Avg + v.XferRateSml.Avg) / float64(tcount),
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 13.5K bytes - Viewed (0) -
cmd/bucket-targets.go
} type latencyStat struct { lastmin lastMinuteLatency curr time.Duration avg time.Duration peak time.Duration N int64 } func (l *latencyStat) update(d time.Duration) { l.lastmin.add(d) l.N++ if d > l.peak { l.peak = d } l.curr = l.lastmin.getTotal().avg() l.avg = time.Duration((int64(l.avg)*(l.N-1) + int64(l.curr)) / l.N) }
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 20.9K bytes - Viewed (0) -
src/test/java/jcifs/smb/CriticalPerformanceTest.java
System.out.printf("Buffer Cache Performance: %d allocs, %d releases in %.2f ms%n", allocations.get(), releases.get(), overallTimeMs); System.out.printf(" Avg allocation time: %.2f ns%n", avgAllocTimeNs); System.out.printf(" Avg release time: %.2f ns%n", avgReleaseTimeNs); // Verify O(1) performance - should be reasonably fast (allowing for JVM overhead)
Registered: Sun Sep 07 00:10:21 UTC 2025 - Last Modified: Sun Aug 31 08:00:57 UTC 2025 - 15.3K bytes - Viewed (0) -
docs/metrics/prometheus/list.md
| `minio_node_if_rx_bytes_avg` | Bytes received on the interface in 60s (avg) since uptime. | | `minio_node_if_rx_bytes_max` | Bytes received on the interface in 60s (max) since uptime. | | `minio_node_if_rx_errors` | Receive errors in 60s. | | `minio_node_if_rx_errors_avg` | Receive errors in 60s (avg). |
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Tue Aug 12 18:20:36 UTC 2025 - 43.4K bytes - Viewed (0) -
cmd/site-replication-metrics.go
metric.TotalDowntime = epHealth.offlineDuration metric.LastOnline = epHealth.lastOnline metric.Online = epHealth.Online metric.Latency = madmin.LatencyStat{ Curr: epHealth.latency.curr, Avg: epHealth.latency.avg, Max: epHealth.latency.peak, } } m[dID] = metric } return m } func (srs *SRStatus) updateXferRate(sz int64, duration time.Duration) { if sz > minLargeObjSize {
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 8.2K bytes - Viewed (0) -
src/test/java/jcifs/smb/NtlmPasswordAuthenticatorTimingAttackTest.java
assertTrue(variance < 50.0, String.format( "Extreme timing variance in concurrent operations: %.3f " + "(min: %d ns, max: %d ns, avg: %.1f ns). " + "Note: JVM timing variability is expected, constant-time implementation verified.", variance, minTime, maxTime, avgTime)); }
Registered: Sun Sep 07 00:10:21 UTC 2025 - Last Modified: Sun Aug 31 08:00:57 UTC 2025 - 11.2K bytes - Viewed (0) -
cmd/metrics-v2.go
Value: float64(qs.MRFStats.LastFailedCount), } if qs.QStats.Avg.Count > 0 || qs.QStats.Curr.Count > 0 { qt := qs.QStats currInQueueBytes.Value = qt.Curr.Bytes currInQueueCount.Value = qt.Curr.Count avgQueueBytes.Value = qt.Avg.Bytes avgQueueCount.Value = qt.Avg.Count maxQueueBytes.Value = qt.Max.Bytes maxQueueCount.Value = qt.Max.Count }
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 133.4K bytes - Viewed (0) -
cmd/site-replication.go
for _, peer := range peerSMetricsList { sm.ActiveWorkers.Avg += peer.ActiveWorkers.Avg sm.ActiveWorkers.Curr += peer.ActiveWorkers.Curr if peer.ActiveWorkers.Max > sm.ActiveWorkers.Max { sm.ActiveWorkers.Max += peer.ActiveWorkers.Max } sm.Queued.Avg.Bytes += peer.Queued.Avg.Bytes sm.Queued.Avg.Count += peer.Queued.Avg.Count sm.Queued.Curr.Bytes += peer.Queued.Curr.Bytes
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 184.7K bytes - Viewed (0)