- Sort Score
- Result 10 results
- Languages All
Results 111 - 120 of 421 for driver (0.03 sec)
-
cmd/metrics-v3-cluster-erasure-set.go
"Write quorum for the erasure set in a pool", poolIDL, setIDL) erasureSetOnlineDrivesCountMD = NewGaugeMD(erasureSetOnlineDrivesCount, "Count of online drives in the erasure set in a pool", poolIDL, setIDL) erasureSetHealingDrivesCountMD = NewGaugeMD(erasureSetHealingDrivesCount, "Count of healing drives in the erasure set in a pool", poolIDL, setIDL) erasureSetHealthMD = NewGaugeMD(erasureSetHealth,
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Tue May 14 07:25:56 UTC 2024 - 4.4K bytes - Viewed (0) -
docs/distributed/DESIGN.md
- We limited the number of drives to 16 for erasure set because, erasure code shards more than 16 can become chatty and do not have any performance advantages. Additionally since 16 drive erasure set gives you tolerance of 8 drives per object by default which is plenty in any practical scenario.
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Wed Feb 26 09:25:50 UTC 2025 - 8K bytes - Viewed (1) -
cmd/endpoint-ellipses.go
msg := fmt.Sprintf("Incorrect number of endpoints provided %s, number of drives %d is not divisible by any supported erasure set sizes %d", args, commonSize, setSizes) return nil, config.ErrInvalidNumberOfErasureEndpoints(nil).Msg(msg) } var setSize uint64 // Custom set drive count allows to override automatic distribution. // only meant if you want to further optimize drive distribution. if setDriveCount > 0 {
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 14.6K bytes - Viewed (0) -
cmd/globals.go
// The global subnet config globalSubnetConfig subnet.Config // The global callhome config globalCallhomeConfig callhome.Config // The global drive config globalDriveConfig drive.Config // Global server's network statistics globalConnStats = newConnStats() // Global HTTP request statistics globalHTTPStats = newHTTPStats()
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Tue Sep 03 18:23:41 UTC 2024 - 16.2K bytes - Viewed (1) -
cmd/metrics-v2.go
Subsystem: driveSubsystem, Name: onlineTotal, Help: "Total drives online in this cluster", Type: gaugeMetric, } } func getClusterDrivesTotalMD() MetricDescription { return MetricDescription{ Namespace: clusterMetricNamespace, Subsystem: driveSubsystem, Name: total, Help: "Total drives in this cluster", Type: gaugeMetric, } }
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 133.4K bytes - Viewed (0) -
src/archive/tar/writer.go
"path" "slices" "strings" "time" ) // Writer provides sequential writing of a tar archive. // [Writer.WriteHeader] begins a new file with the provided [Header], // and then Writer can be treated as an io.Writer to supply that file's data. type Writer struct { w io.Writer pad int64 // Amount of padding to write after current file entry curr fileWriter // Writer for current file entry
Registered: Tue Sep 09 11:13:09 UTC 2025 - Last Modified: Mon Feb 03 16:38:43 UTC 2025 - 19.7K bytes - Viewed (0) -
docs/config/README.md
In most setups this is sufficient to heal the content after drive replacements. Setting `max_sleep` to a *lower* value and setting `max_io` to a *higher* value would make heal go faster.
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Tue Aug 12 18:20:36 UTC 2025 - 18.1K bytes - Viewed (1) -
cmd/erasure-healing-common_test.go
errs, fi, false, bucket, object, madmin.HealDeepScan) for diskIndex, disk := range erasureDisks { if diskIndex == 0 && disk != nil { t.Errorf("Drive not filtered as expected, drive: %d", diskIndex) } if diskIndex != 0 && disk == nil { t.Errorf("Drive erroneously filtered, driveIndex: %d", diskIndex) } } partsMetadata[0] = partsMetadataBackup // Revert before going to the next test
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 22.4K bytes - Viewed (0) -
cmd/admin-heal-ops.go
if serverDebugLog { fmt.Printf("Task in the queue: %#v\n", task) } case <-h.ctx.Done(): return nil } countOKDrives := func(drives []madmin.HealDriveInfo) (count int) { for _, drive := range drives { if drive.State == madmin.DriveStateOk { count++ } } return count } // task queued, now wait for the response. select { case res := <-task.respCh:
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 25.4K bytes - Viewed (0) -
cmd/metrics.go
"Total number of offline drives in current MinIO server instance", nil, nil), prometheus.GaugeValue, float64(offlineDisks.Sum()), ) // MinIO Total Disks per node ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(minioNamespace, "drives", "total"), "Total number of drives for current MinIO server instance", nil, nil),
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Thu Aug 15 12:04:40 UTC 2024 - 16.6K bytes - Viewed (0)