- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 323 for disks (0.02 sec)
-
cmd/testdata/config/invalid-disks.yaml
- - 'https://server-example-pool1:9000/mnt/disk1/' - 'https://server1-pool1:9000/mnt/disk{1...4}/' - 'https://server3-pool1:9000/mnt/disk{1...4}/' - 'https://server4-pool1:9000/mnt/disk{1...4}/' - - 'https://server-example-pool2:9000/mnt/disk{1...4}/' - 'https://server1-pool2:9000/mnt/disk{1...4}/' - 'https://server3-pool2:9000/mnt/disk{1...4}/'
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Thu Dec 07 09:33:56 UTC 2023 - 863 bytes - Viewed (0) -
buildscripts/verify-healing-with-root-disks.sh
done done } # Prepare fake disks with losetup function prepare_block_devices() { set -e mkdir -p ${WORK_DIR}/disks/ ${WORK_DIR}/mnt/ sudo modprobe loop for i in 1 2 3 4; do dd if=/dev/zero of=${WORK_DIR}/disks/img.${i} bs=1M count=2000 device=$(sudo losetup --find --show ${WORK_DIR}/disks/img.${i}) sudo mkfs.ext4 -F ${device} mkdir -p ${WORK_DIR}/mnt/disk${i}/ sudo mount ${device} ${WORK_DIR}/mnt/disk${i}/
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri May 26 05:07:25 UTC 2023 - 2.2K bytes - Viewed (0) -
cmd/erasure-metadata-utils_test.go
if shuffledDisks[0] != disks[8] || shuffledDisks[1] != disks[7] || shuffledDisks[2] != disks[9] || shuffledDisks[3] != disks[6] || shuffledDisks[4] != disks[10] || shuffledDisks[5] != disks[5] || shuffledDisks[6] != disks[11] || shuffledDisks[7] != disks[4] || shuffledDisks[8] != disks[12] || shuffledDisks[9] != disks[3] || shuffledDisks[10] != disks[13] || shuffledDisks[11] != disks[2] ||
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 7.3K bytes - Viewed (0) -
cmd/erasure.go
} }() // Restrict parallelism for disk usage scanner // upto GOMAXPROCS if GOMAXPROCS is < len(disks) maxProcs := runtime.GOMAXPROCS(0) if maxProcs < len(disks) { disks = disks[:maxProcs] } // Start one scanner per disk var wg sync.WaitGroup wg.Add(len(disks)) for i := range disks { go func(i int) { defer wg.Done() disk := disks[i] for bucket := range bucketCh {
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 16.1K bytes - Viewed (0) -
cmd/global-heal.go
for id, disks := range indexed { ss := madmin.SetStatus{ ID: id, SetIndex: disks[0].SetIndex, PoolIndex: disks[0].PoolIndex, } for _, disk := range disks { ss.Disks = append(ss.Disks, disk) if disk.Healing { ss.HealStatus = "Healing" ss.HealPriority = "high" status.HealDisks = append(status.HealDisks, disk.Endpoint) } } sortDisks(ss.Disks)
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Apr 04 13:49:12 UTC 2025 - 16.2K bytes - Viewed (0) -
cmd/prepare-storage.go
return nil, err } // All disks report unformatted we should initialized everyone. if shouldInitErasureDisks(sErrs) && firstDisk { logger.Info("Formatting %s pool, %v set(s), %v drives per set.", humanize.Ordinal(poolCount), setCount, setDriveCount) // Initialize erasure code format on disks
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Jul 12 20:51:54 UTC 2024 - 11.1K bytes - Viewed (1) -
cmd/erasure-metadata-utils.go
func readAllFileInfo(ctx context.Context, disks []StorageAPI, origbucket string, bucket, object, versionID string, readData, healing bool) ([]FileInfo, []error) { metadataArray := make([]FileInfo, len(disks)) opts := ReadOptions{ ReadData: readData, Healing: healing, } g := errgroup.WithNErrs(len(disks)) // Read `xl.meta` in parallel across disks. for index := range disks { g.Go(func() (err error) {
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 11.7K bytes - Viewed (0) -
cmd/erasure-healing-common.go
} else { dataErrsByPart[p][i] = verifyResp.Results[p] } } } // Build dataErrs by disk from dataErrs by part for part, disks := range dataErrsByPart { for disk := range disks { dataErrsByDisk[disk][part] = dataErrsByPart[part][disk] } } return
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Sun Mar 30 00:56:02 UTC 2025 - 12K bytes - Viewed (0) -
cmd/erasure-common.go
// Based on the random shuffling return back randomized disks. r := rand.New(rand.NewSource(time.Now().UnixNano())) for _, i := range r.Perm(len(disks)) { if disks[i] != nil && disks[i].IsLocal() { newDisks = append(newDisks, disks[i]) } } return newDisks } func (er erasureObjects) getLocalDisks() (newDisks []StorageAPI) { disks := er.getDisks()
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 2.3K bytes - Viewed (0) -
cmd/metrics-realtime.go
m.ByDisk = make(map[string]madmin.DiskMetric) aggr := madmin.DiskMetric{ CollectedAt: time.Now(), } for name, disk := range collectLocalDisksMetrics(opts.disks) { m.ByDisk[name] = disk aggr.Merge(&disk) } m.Aggregated.Disk = &aggr } if types.Contains(madmin.MetricsScanner) { metrics := globalScannerMetrics.report() m.Aggregated.Scanner = &metrics }
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Sat Jun 01 05:16:24 UTC 2024 - 6.3K bytes - Viewed (0)