Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 232 for disks (0.15 sec)

  1. cmd/testdata/config/invalid-disks.yaml

      -
            - 'https://server-example-pool1:9000/mnt/disk1/'
            - 'https://server1-pool1:9000/mnt/disk{1...4}/'
            - 'https://server3-pool1:9000/mnt/disk{1...4}/'
            - 'https://server4-pool1:9000/mnt/disk{1...4}/'
      -
            - 'https://server-example-pool2:9000/mnt/disk{1...4}/'
            - 'https://server1-pool2:9000/mnt/disk{1...4}/'
            - 'https://server3-pool2:9000/mnt/disk{1...4}/'
    Others
    - Registered: Sun May 05 19:28:20 GMT 2024
    - Last Modified: Thu Dec 07 09:33:56 GMT 2023
    - 863 bytes
    - Viewed (0)
  2. cmd/erasure-heal_test.go

    		}
    
    		readers := make([]io.ReaderAt, len(disks))
    		for i, disk := range disks {
    			shardFilesize := erasure.ShardFileSize(test.size)
    			readers[i] = newBitrotReader(disk, nil, "testbucket", "testobject", shardFilesize, test.algorithm, bitrotWriterSum(writers[i]), erasure.ShardSize())
    		}
    
    		// setup stale disks for the test case
    		staleDisks := make([]StorageAPI, len(disks))
    		copy(staleDisks, disks)
    		for j := 0; j < len(staleDisks); j++ {
    Go
    - Registered: Sun May 05 19:28:20 GMT 2024
    - Last Modified: Tue Jan 30 20:43:25 GMT 2024
    - 7.9K bytes
    - Viewed (0)
  3. cmd/erasure-object.go

    }
    
    func readAllRawFileInfo(ctx context.Context, disks []StorageAPI, bucket, object string, readData bool) ([]RawFileInfo, []error) {
    	rawFileInfos := make([]RawFileInfo, len(disks))
    	g := errgroup.WithNErrs(len(disks))
    	for index := range disks {
    		index := index
    		g.Go(func() (err error) {
    			if disks[index] == nil {
    				return errDiskNotFound
    			}
    			rf, err := disks[index].ReadXL(ctx, bucket, object, readData)
    Go
    - Registered: Sun May 05 19:28:20 GMT 2024
    - Last Modified: Sun May 05 16:56:21 GMT 2024
    - 77.2K bytes
    - Viewed (2)
  4. cmd/erasure-common.go

    	// Based on the random shuffling return back randomized disks.
    	r := rand.New(rand.NewSource(time.Now().UnixNano()))
    
    	for _, i := range r.Perm(len(disks)) {
    		if disks[i] != nil && disks[i].IsLocal() {
    			newDisks = append(newDisks, disks[i])
    		}
    	}
    
    	return newDisks
    }
    
    func (er erasureObjects) getLocalDisks() (newDisks []StorageAPI) {
    	disks := er.getDisks()
    Go
    - Registered: Sun May 05 19:28:20 GMT 2024
    - Last Modified: Sun Apr 28 17:53:50 GMT 2024
    - 4.6K bytes
    - Viewed (0)
  5. cmd/erasure-metadata-utils_test.go

    	if shuffledDisks[0] != disks[8] ||
    		shuffledDisks[1] != disks[7] ||
    		shuffledDisks[2] != disks[9] ||
    		shuffledDisks[3] != disks[6] ||
    		shuffledDisks[4] != disks[10] ||
    		shuffledDisks[5] != disks[5] ||
    		shuffledDisks[6] != disks[11] ||
    		shuffledDisks[7] != disks[4] ||
    		shuffledDisks[8] != disks[12] ||
    		shuffledDisks[9] != disks[3] ||
    		shuffledDisks[10] != disks[13] ||
    		shuffledDisks[11] != disks[2] ||
    Go
    - Registered: Sun May 05 19:28:20 GMT 2024
    - Last Modified: Thu Jan 18 07:03:17 GMT 2024
    - 7.4K bytes
    - Viewed (0)
  6. cmd/format-erasure.go

    // relinquishes the underlying connection for all storage disks.
    func closeStorageDisks(storageDisks ...StorageAPI) {
    	var wg sync.WaitGroup
    	for _, disk := range storageDisks {
    		if disk == nil {
    			continue
    		}
    		wg.Add(1)
    		go func(disk StorageAPI) {
    			defer wg.Done()
    			disk.Close()
    		}(disk)
    	}
    	wg.Wait()
    }
    
    // Initialize storage disks for each endpoint.
    Go
    - Registered: Sun May 05 19:28:20 GMT 2024
    - Last Modified: Fri May 03 15:54:03 GMT 2024
    - 23.2K bytes
    - Viewed (0)
  7. cmd/erasure-object_test.go

    		}
    
    		// Step 3: Upload the object with some disks offline
    		sets.erasureDisksMu.Lock()
    		xl.getDisks = func() []StorageAPI {
    			disks := make([]StorageAPI, len(origErasureDisks))
    			copy(disks, origErasureDisks)
    			disks[0] = nil
    			disks[1] = nil
    			return disks
    		}
    		sets.erasureDisksMu.Unlock()
    Go
    - Registered: Sun May 05 19:28:20 GMT 2024
    - Last Modified: Tue Jan 30 20:43:25 GMT 2024
    - 36.8K bytes
    - Viewed (0)
  8. cmd/erasure.go

    		}
    	}()
    
    	// Restrict parallelism for disk usage scanner
    	// upto GOMAXPROCS if GOMAXPROCS is < len(disks)
    	maxProcs := runtime.GOMAXPROCS(0)
    	if maxProcs < len(disks) {
    		disks = disks[:maxProcs]
    	}
    
    	// Start one scanner per disk
    	var wg sync.WaitGroup
    	wg.Add(len(disks))
    
    	for i := range disks {
    		go func(i int) {
    			defer wg.Done()
    			disk := disks[i]
    
    			for bucket := range bucketCh {
    Go
    - Registered: Sun May 05 19:28:20 GMT 2024
    - Last Modified: Fri Apr 26 06:32:14 GMT 2024
    - 16K bytes
    - Viewed (1)
  9. cmd/metrics-realtime.go

    		m.ByDisk = make(map[string]madmin.DiskMetric)
    		aggr := madmin.DiskMetric{
    			CollectedAt: time.Now(),
    		}
    		for name, disk := range collectLocalDisksMetrics(opts.disks) {
    			m.ByDisk[name] = disk
    			aggr.Merge(&disk)
    		}
    		m.Aggregated.Disk = &aggr
    	}
    
    	if types.Contains(madmin.MetricsScanner) {
    		metrics := globalScannerMetrics.report()
    		m.Aggregated.Scanner = &metrics
    	}
    Go
    - Registered: Sun May 05 19:28:20 GMT 2024
    - Last Modified: Wed Apr 10 16:28:08 GMT 2024
    - 6.1K bytes
    - Viewed (0)
  10. cmd/erasure-decode_test.go

    	}
    	disks := setup.disks
    	erasure, err := NewErasure(context.Background(), data, parity, blockSizeV2)
    	if err != nil {
    		b.Fatalf("failed to create ErasureStorage: %v", err)
    	}
    
    	writers := make([]io.Writer, len(disks))
    	for i, disk := range disks {
    		if disk == nil {
    			continue
    		}
    Go
    - Registered: Sun May 05 19:28:20 GMT 2024
    - Last Modified: Tue Jan 30 20:43:25 GMT 2024
    - 21.1K bytes
    - Viewed (1)
Back to top