- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 18 for GetDisks (1.85 sec)
-
cmd/erasure-object_test.go
if err != nil { t.Fatal(err) } erasureDisks := xl.getDisks() z.serverPools[0].erasureDisksMu.Lock() xl.getDisks = func() []StorageAPI { for i := range erasureDisks[:6] { erasureDisks[i] = newNaughtyDisk(erasureDisks[i], nil, errFaultyDisk) } return erasureDisks }
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 38.3K bytes - Viewed (0) -
cmd/erasure.go
// erasureObjects - Implements ER object layer. type erasureObjects struct { setDriveCount int defaultParityCount int setIndex int poolIndex int // getDisks returns list of storageAPIs. getDisks func() []StorageAPI // getLockers returns list of remote and local lockers. getLockers func() ([]dsync.NetLocker, string) // getEndpoints returns list of endpoint belonging this set.
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 16.1K bytes - Viewed (0) -
cmd/erasure-healing_test.go
return disk } // Remove 4 disks. setDisks(nil, nil, nil, nil) // Create delete marker under quorum. objInfo, err := objLayer.DeleteObject(ctx, bucket, object, ObjectOptions{Versioned: true}) if err != nil { t.Fatal(err) } // Restore... setDisks(orgDisks[:4]...)
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 48.5K bytes - Viewed (0) -
cmd/erasure-healing-common_test.go
if err != nil { t.Fatalf("Failed to make a bucket %v", err) } object := "object" data := bytes.Repeat([]byte("a"), smallFileThreshold*32) z := obj.(*erasureServerPools) erasureDisks, err := z.GetDisks(0, 0) if err != nil { t.Fatal(err) } for i, test := range testCases { t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) {
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 22.4K bytes - Viewed (0) -
cmd/erasure-common.go
package cmd import ( "context" "math/rand" "sync" "time" ) func (er erasureObjects) getOnlineDisks() (newDisks []StorageAPI) { disks := er.getDisks() var wg sync.WaitGroup var mu sync.Mutex r := rand.New(rand.NewSource(time.Now().UnixNano())) for _, i := range r.Perm(len(disks)) { wg.Add(1) go func() { defer wg.Done() if disks[i] == nil {
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 2.3K bytes - Viewed (0) -
cmd/erasure-object.go
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Sun Sep 07 16:13:09 UTC 2025 - 80.4K bytes - Viewed (0) -
cmd/erasure-multipart.go
defer func() { if errors.Is(err, errFileNotFound) { err = errUploadIDNotFound } }() uploadIDPath := er.getUploadIDDir(bucket, object, uploadID) storageDisks := er.getDisks() // Read metadata associated with the object from all disks. partsMetadata, errs := readAllFileInfo(ctx, storageDisks, bucket, minioMetaMultipartBucket, uploadIDPath, "", false, false)
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Sun Sep 07 16:13:09 UTC 2025 - 47.3K bytes - Viewed (0) -
cmd/erasure-healing.go
dryRun := opts.DryRun scanMode := opts.ScanMode storageDisks := er.getDisks() storageEndpoints := er.getEndpoints() defer func() { er.auditHealObject(ctx, bucket, object, versionID, result, err) }() if globalTrace.NumSubscribers(madmin.TraceHealing) > 0 {
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 34.6K bytes - Viewed (0) -
cmd/erasure-sets.go
copy(eps, s.endpoints.Endpoints[setIndex*s.setDriveCount:setIndex*s.setDriveCount+s.setDriveCount]) return eps } } // GetDisks returns a closure for a given set, which provides list of disks per set. func (s *erasureSets) GetDisks(setIndex int) func() []StorageAPI { return func() []StorageAPI { s.erasureDisksMu.RLock() defer s.erasureDisksMu.RUnlock()
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 37K bytes - Viewed (1) -
cmd/erasure-metadata-utils_test.go
testShuffleDisks(t, z) } // Test shuffleDisks which returns shuffled slice of disks for their actual distribution. func testShuffleDisks(t *testing.T, z *erasureServerPools) { disks := z.serverPools[0].GetDisks(0)() distribution := []int{16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15} shuffledDisks := shuffleDisks(disks, distribution) // From the "distribution" above you can notice that:
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 7.3K bytes - Viewed (0)