- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 335 for Drives (0.11 sec)
-
cmd/admin-handlers.go
anonNetwork[anonEndpoint] = status } return anonNetwork } anonymizeDrives := func(drives []madmin.Disk) []madmin.Disk { anonDrives := []madmin.Disk{} for _, drive := range drives { drive.Endpoint = anonAddr(drive.Endpoint) anonDrives = append(anonDrives, drive) } return anonDrives } go func() { defer xioutil.SafeClose(healthInfoCh)
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Oct 04 11:32:32 UTC 2024 - 99.7K bytes - Viewed (0) -
cmd/erasure-server-pool.go
if errors.Is(err, errNoHealRequired) { countNoHeal++ } r.DiskCount += result.DiskCount r.SetCount += result.SetCount r.Before.Drives = append(r.Before.Drives, result.Before.Drives...) r.After.Drives = append(r.After.Drives, result.After.Drives...) } // No heal returned by all serverPools, return errNoHealRequired if countNoHeal == len(z.serverPools) { return r, errNoHealRequired }
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Sun Sep 29 22:40:36 UTC 2024 - 89.8K bytes - Viewed (0) -
docs/config/README.md
In most setups this is sufficient to heal the content after drive replacements. Setting `max_sleep` to a *lower* value and setting `max_io` to a *higher* value would make heal go faster.
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Aug 16 08:43:49 UTC 2024 - 17.9K bytes - Viewed (1) -
buildscripts/verify-healing.sh
echo "my content" | /tmp/mc pipe myminio/testbucket/file-$i done } function perform_test() { start_port=$2 start_minio_3_node $start_port echo "Testing Distributed Erasure setup healing of drives" echo "Remove the contents of the disks belonging to '${1}' node" rm -rf ${WORK_DIR}/${1}/*/ set -x start_minio_3_node $start_port check_heal ${1} rv=$? if [ "$rv" == "1" ]; then
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Jul 12 20:51:54 UTC 2024 - 4K bytes - Viewed (0) -
cmd/admin-heal-ops.go
if serverDebugLog { fmt.Printf("Task in the queue: %#v\n", task) } case <-h.ctx.Done(): return nil } countOKDrives := func(drives []madmin.HealDriveInfo) (count int) { for _, drive := range drives { if drive.State == madmin.DriveStateOk { count++ } } return count } // task queued, now wait for the response. select { case res := <-task.respCh:
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Sat Oct 26 09:58:27 UTC 2024 - 25.6K bytes - Viewed (0) -
cmd/erasure-multipart.go
if disk == nil || !disk.IsOnline() { parityDrives++ offlineDrives++ continue } } if offlineDrives >= (len(onlineDisks)+1)/2 { // if offline drives are more than 50% of the drives // we have no quorum, we shouldn't proceed just // fail at that point. return nil, toObjectErr(errErasureWriteQuorum, bucket, object) } if parityDrives >= len(onlineDisks)/2 {
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Sun Sep 29 22:40:36 UTC 2024 - 44.7K bytes - Viewed (0) -
docs/debugging/README.md
### Remotely Inspecting backend data
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Feb 25 01:17:53 UTC 2022 - 8.7K bytes - Viewed (0) -
cmd/erasure-healing_test.go
if hr.Before.Drives[0].State != madmin.DriveStateMissing { t.Fatalf("Unexpected drive state: %v", hr.Before.Drives[0].State) } // Check the state of all other disks (should be ok) for i, h := range append(hr.Before.Drives[1:], hr.After.Drives...) { if h.State != madmin.DriveStateOk { t.Fatalf("Unexpected drive state (%d): %v", i+1, h.State) } }
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Mon Jun 10 15:51:27 UTC 2024 - 49K bytes - Viewed (0) -
cmd/metrics-v2.go
Subsystem: driveSubsystem, Name: onlineTotal, Help: "Total drives online in this cluster", Type: gaugeMetric, } } func getClusterDrivesTotalMD() MetricDescription { return MetricDescription{ Namespace: clusterMetricNamespace, Subsystem: driveSubsystem, Name: total, Help: "Total drives in this cluster", Type: gaugeMetric, } }
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Thu Aug 15 12:04:40 UTC 2024 - 131.9K bytes - Viewed (0) -
cmd/testdata/xl-meta-merge.zip
deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) for more complete documentation. ### Homebrew (recommended) Run the following command to install the latest stable MinIO package using [Homebrew](https://brew.sh/). Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data. ```sh brew install minio/stable/minio...
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Mar 08 17:50:48 UTC 2024 - 30.2K bytes - Viewed (0)