- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 388 for drives (0.08 sec)
-
cmd/erasure-server-pool.go
if errors.Is(err, errNoHealRequired) { countNoHeal++ } r.DiskCount += result.DiskCount r.SetCount += result.SetCount r.Before.Drives = append(r.Before.Drives, result.Before.Drives...) r.After.Drives = append(r.After.Drives, result.After.Drives...) } // No heal returned by all serverPools, return errNoHealRequired if countNoHeal == len(z.serverPools) { return r, errNoHealRequired }
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 89.1K bytes - Viewed (0) -
cmd/testdata/config/invalid-disks.yaml
version: v1 address: ':9000' console-address: ':9001' certs-dir: '/home/user/.minio/certs/' pools: # Specify the nodes and drives with pools - - 'https://server-example-pool1:9000/mnt/disk1/' - 'https://server1-pool1:9000/mnt/disk{1...4}/' - 'https://server3-pool1:9000/mnt/disk{1...4}/' - 'https://server4-pool1:9000/mnt/disk{1...4}/' - - 'https://server-example-pool2:9000/mnt/disk{1...4}/'
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Thu Dec 07 09:33:56 UTC 2023 - 863 bytes - Viewed (0) -
cmd/admin-handlers.go
anonNetwork[anonEndpoint] = status } return anonNetwork } anonymizeDrives := func(drives []madmin.Disk) []madmin.Disk { anonDrives := []madmin.Disk{} for _, drive := range drives { drive.Endpoint = anonAddr(drive.Endpoint) anonDrives = append(anonDrives, drive) } return anonDrives } go func() { defer xioutil.SafeClose(healthInfoCh)
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 99.6K bytes - Viewed (0) -
docs/config/README.md
In most setups this is sufficient to heal the content after drive replacements. Setting `max_sleep` to a *lower* value and setting `max_io` to a *higher* value would make heal go faster.
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Tue Aug 12 18:20:36 UTC 2025 - 18.1K bytes - Viewed (1) -
buildscripts/verify-healing.sh
echo "my content" | /tmp/mc pipe myminio/testbucket/file-$i done } function perform_test() { start_port=$2 start_minio_3_node $start_port echo "Testing Distributed Erasure setup healing of drives" echo "Remove the contents of the disks belonging to '${1}' node" rm -rf ${WORK_DIR}/${1}/*/ set -x start_minio_3_node $start_port check_heal ${1} rv=$? if [ "$rv" == "1" ]; then
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Jul 12 20:51:54 UTC 2024 - 4K bytes - Viewed (0) -
docs/metrics/prometheus/grafana/minio-dashboard.json
"hide": false, "instant": true, "legendFormat": ".", "range": false, "refId": "B" } ], "title": "Total Online/Offline Drives", "type": "gauge" }, { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": {
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Mon Aug 04 01:46:49 UTC 2025 - 93.1K bytes - Viewed (0) -
cmd/erasure-metadata-utils.go
shuffledPartsMetadata = make([]FileInfo, len(disks)) distribution := fi.Erasure.Distribution var inconsistent int for i, meta := range metaArr { if disks[i] == nil { // Assuming offline drives as inconsistent, // to be safe and fallback to original // distribution order. inconsistent++ continue } if !meta.IsValid() { inconsistent++ continue }
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 11.7K bytes - Viewed (0) -
cmd/erasure-sets_test.go
storageDisks, format, err := waitForFormatErasure(true, endpoints, 1, 1, 16, "") if err != nil { t.Fatalf("Unable to format drives for erasure, %s", err) } ep := PoolEndpoints{Endpoints: endpoints} parity, err := ecDrivesNoConfig(16) if err != nil { t.Fatalf("Unexpected error during EC drive config: %v", err) } if _, err := newErasureSets(ctx, ep, storageDisks, format, parity, 0); err != nil {
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 6.8K bytes - Viewed (0) -
docs/debugging/README.md
### Remotely Inspecting backend data
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Tue Aug 12 18:20:36 UTC 2025 - 8.6K bytes - Viewed (0) -
cmd/erasure-healing-common_test.go
errs, fi, false, bucket, object, madmin.HealDeepScan) for diskIndex, disk := range erasureDisks { if diskIndex == 0 && disk != nil { t.Errorf("Drive not filtered as expected, drive: %d", diskIndex) } if diskIndex != 0 && disk == nil { t.Errorf("Drive erroneously filtered, driveIndex: %d", diskIndex) } } partsMetadata[0] = partsMetadataBackup // Revert before going to the next test
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 22.4K bytes - Viewed (0)