- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 53 for Drives (0.04 sec)
-
cmd/peer-s3-server.go
Deleted: v.Created, }) } return true }) return buckets, nil } func cloneDrives(drives map[string]StorageAPI) []StorageAPI { copyDrives := make([]StorageAPI, 0, len(drives)) for _, drive := range drives { copyDrives = append(copyDrives, drive) } return copyDrives } func getBucketInfoLocal(ctx context.Context, bucket string, opts BucketOptions) (BucketInfo, error) {
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 8.2K bytes - Viewed (0) -
cmd/format-erasure.go
return fmt.Errorf("%s drive is already being used in another erasure deployment. (Number of drives specified: %d but the number of drives found in the %s drive's format.json: %d)", disks[i], len(formats), humanize.Ordinal(i+1), len(formatErasure.Erasure.Sets)*len(formatErasure.Erasure.Sets[0])) } if len(formatErasure.Erasure.Sets[0]) != setDriveCount {
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 23.1K bytes - Viewed (0) -
cmd/erasure-healing.go
// Bucket or prefix/directory not found hr.Before.Drives[i] = madmin.HealDriveInfo{Endpoint: drive, State: madmin.DriveStateMissing} hr.After.Drives[i] = madmin.HealDriveInfo{Endpoint: drive, State: madmin.DriveStateMissing} default: hr.Before.Drives[i] = madmin.HealDriveInfo{Endpoint: drive, State: madmin.DriveStateCorrupt} hr.After.Drives[i] = madmin.HealDriveInfo{Endpoint: drive, State: madmin.DriveStateCorrupt} } }Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 34.6K bytes - Viewed (0) -
cmd/fmt-gen.go
if deploymentID != "" { newFormat.ID = deploymentID } drive := pool.Endpoints[i*setDriveCount+j] fmtBytes, err := json.Marshal(newFormat) if err != nil { //nolint:gocritic log.Fatalf("failed to marshal format.json for %s: %v", drive.String(), err) } fmtJSON := filepath.Join(drive.Host, drive.Path, minioMetaBucket, "format.json") embedFileInZip(fmtZipW, fmtJSON, fmtBytes, 0o600)Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 3.7K bytes - Viewed (0) -
cmd/server-main_test.go
ctx, cancel := context.WithCancel(t.Context()) defer cancel() // Tests for ErasureSD object layer. nDisks := 1 disks, err := getRandomDisks(nDisks) if err != nil { t.Fatal("Failed to create drives for the backend") } defer removeRoots(disks) obj, err := newObjectLayer(ctx, mustGetPoolEndpoints(0, disks...)) if err != nil { t.Fatal("Unexpected object layer initialization error", err) }
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 3.1K bytes - Viewed (0) -
cmd/peer-s3-client.go
SetCount: -1, // explicitly set an invalid value -1, for bucket heal scenario } for i, err := range errs { if err == nil { res.Before.Drives = append(res.Before.Drives, healBucketResults[i].Before.Drives...) res.After.Drives = append(res.After.Drives, healBucketResults[i].After.Drives...) } } return res, nil } // ListBuckets lists buckets across all nodes and returns a consistent view:Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 15.6K bytes - Viewed (0) -
cmd/endpoint-ellipses.go
msg := fmt.Sprintf("Incorrect number of endpoints provided %s, number of drives %d is not divisible by any supported erasure set sizes %d", args, commonSize, setSizes) return nil, config.ErrInvalidNumberOfErasureEndpoints(nil).Msg(msg) } var setSize uint64 // Custom set drive count allows to override automatic distribution. // only meant if you want to further optimize drive distribution. if setDriveCount > 0 {
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 14.6K bytes - Viewed (0) -
cmd/erasure.go
} rootDiskCount := 0 for _, di := range disksInfo { if di.RootDisk { rootDiskCount++ } } // Count offline disks as well to ensure consistent // reportability of offline drives on local setups. if len(disksInfo) == (rootDiskCount + offlineDisks.Sum()) { // Success. return onlineDisks, offlineDisks } // Root disk should be considered offline for i := range disksInfo {Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 16.1K bytes - Viewed (0) -
cmd/erasure-sets.go
} // Fetch all the drive info status. beforeDrives := formatsToDrivesInfo(s.endpoints.Endpoints, formats, sErrs) res.After.Drives = make([]madmin.HealDriveInfo, len(beforeDrives)) res.Before.Drives = make([]madmin.HealDriveInfo, len(beforeDrives)) // Copy "after" drive state too from before. for k, v := range beforeDrives { res.Before.Drives[k] = v res.After.Drives[k] = v }
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 37K bytes - Viewed (1) -
cmd/erasure-heal_test.go
func TestErasureHeal(t *testing.T) { for i, test := range erasureHealTests { if test.offDisks < test.badStaleDisks { // test case sanity check t.Fatalf("Test %d: Bad test case - number of stale drives cannot be less than number of badstale drives", i) } // create some test data setup, err := newErasureTestSetup(t, test.dataBlocks, test.disks-test.dataBlocks, test.blocksize) if err != nil {
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 7.9K bytes - Viewed (0)