- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 145 for disk (0.13 sec)
-
cmd/erasure-sets.go
} // An online-disk means its a valid disk but it may be a re-connected disk // we verify that here based on LastConn(), however we make sure to avoid // putting it back into the s.erasureDisks by re-placing the disk again. _, setIndex, _ := cdisk.GetDiskLoc() if setIndex != -1 { continue } } if cdisk != nil { // Close previous offline disk. cdisk.Close() } wg.Add(1)
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 37K bytes - Viewed (1) -
cmd/erasure.go
} }() // Restrict parallelism for disk usage scanner // upto GOMAXPROCS if GOMAXPROCS is < len(disks) maxProcs := runtime.GOMAXPROCS(0) if maxProcs < len(disks) { disks = disks[:maxProcs] } // Start one scanner per disk var wg sync.WaitGroup wg.Add(len(disks)) for i := range disks { go func(i int) { defer wg.Done() disk := disks[i] for bucket := range bucketCh {
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 16.1K bytes - Viewed (0) -
cmd/erasure-healing.go
var wg sync.WaitGroup // Remove versions in bulk for each disk for index, disk := range storageDisks { if disk == nil { continue } wg.Add(1) go func(index int, disk StorageAPI) { defer wg.Done() _ = disk.Delete(ctx, bucket, object, DeleteOptions{ Recursive: false, Immediate: false, }) }(index, disk) } wg.Wait() } }
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 34.6K bytes - Viewed (0) -
cmd/background-newdisks-heal-ops.go
// The disk ID will be validated against the loaded one. func loadHealingTracker(ctx context.Context, disk StorageAPI) (*healingTracker, error) { if disk == nil { return nil, errors.New("loadHealingTracker: nil drive given") } diskID, err := disk.GetDiskID() if err != nil { return nil, err } b, err := disk.ReadAll(ctx, minioMetaBucket,
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 16.5K bytes - Viewed (0) -
cmd/erasure-multipart.go
var uploadIDs []string var disk StorageAPI disks := er.getOnlineLocalDisks() if len(disks) == 0 { // If no local, get non-healing disks. var ok bool if disks, ok = er.getOnlineDisksWithHealing(false); !ok { disks = er.getOnlineDisks() } } for _, disk = range disks { if disk == nil { continue } if !disk.IsOnline() { continue }
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Sun Sep 07 16:13:09 UTC 2025 - 47.3K bytes - Viewed (0) -
cmd/rebalance-admin.go
if err != nil { return r, err } // Compute disk usage percentage si := z.StorageInfo(ctx, true) diskStats := make([]struct { AvailableSpace uint64 TotalSpace uint64 }, len(z.serverPools)) for _, disk := range si.Disks { // Ignore invalid. if disk.PoolIndex < 0 || len(diskStats) <= disk.PoolIndex { // https://github.com/minio/minio/issues/16500 continue }
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 3.8K bytes - Viewed (0) -
cmd/format-erasure.go
// relinquishes the underlying connection for all storage disks. func closeStorageDisks(storageDisks ...StorageAPI) { var wg sync.WaitGroup for _, disk := range storageDisks { if disk == nil { continue } wg.Add(1) go func(disk StorageAPI) { defer wg.Done() disk.Close() }(disk) } wg.Wait() } // Initialize storage disks for each endpoint.
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 23.1K bytes - Viewed (0) -
cmd/metacache-set.go
// However many we ask, versions must exist on ~50% listingQuorum := (askDisks + 1) / 2 if askDisks > 0 && len(disks) > askDisks { rand.Shuffle(len(disks), func(i, j int) { disks[i], disks[j] = disks[j], disks[i] }) fallbackDisks = disks[askDisks:] disks = disks[:askDisks] } // How to resolve results. resolver := metadataResolutionParams{ dirQuorum: listingQuorum, objQuorum: listingQuorum,
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 30.7K bytes - Viewed (0) -
cmd/erasure-decode_test.go
} disks := setup.disks erasure, err := NewErasure(context.Background(), data, parity, blockSizeV2) if err != nil { b.Fatalf("failed to create ErasureStorage: %v", err) } writers := make([]io.Writer, len(disks)) for i, disk := range disks { if disk == nil { continue }
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 21K bytes - Viewed (0) -
cmd/erasure-heal_test.go
} readers := make([]io.ReaderAt, len(disks)) for i, disk := range disks { shardFilesize := erasure.ShardFileSize(test.size) readers[i] = newBitrotReader(disk, nil, "testbucket", "testobject", shardFilesize, test.algorithm, bitrotWriterSum(writers[i]), erasure.ShardSize()) } // setup stale disks for the test case staleDisks := make([]StorageAPI, len(disks)) copy(staleDisks, disks) for j := range staleDisks {
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 7.9K bytes - Viewed (0)