Search Options

Results per page
Sort
Preferred Languages
Advance

Results 381 - 390 of 438 for Tools (0.5 sec)

  1. docs/distributed/DECOMMISSION.md

    # Decommissioning
    
    Decommissiong is a mechanism in MinIO to drain older pools (usually with old hardware) and migrate the content from such pools to a newer pools (usually better hardware). Decommissioning spreads the data across all pools - for example, if you decommission `pool1`, all the data from `pool1` spreads across `pool2` and `pool3`.
    
    ## Features
    
    Registered: Sun Sep 07 19:28:11 UTC 2025
    - Last Modified: Mon Jul 11 14:59:49 UTC 2022
    - 8.3K bytes
    - Viewed (0)
  2. cmd/erasure-server-pool-decom_gen.go

    			if err != nil {
    				err = msgp.WrapError(err, "Pools")
    				return
    			}
    			if cap(z.Pools) >= int(zb0002) {
    				z.Pools = (z.Pools)[:zb0002]
    			} else {
    				z.Pools = make([]PoolStatus, zb0002)
    			}
    			for za0001 := range z.Pools {
    				err = z.Pools[za0001].DecodeMsg(dc)
    				if err != nil {
    					err = msgp.WrapError(err, "Pools", za0001)
    					return
    				}
    			}
    		default:
    Registered: Sun Sep 07 19:28:11 UTC 2025
    - Last Modified: Mon Jul 04 21:02:54 UTC 2022
    - 26.7K bytes
    - Viewed (0)
  3. cmd/erasure-server-pool-decom.go

    		p.Pools[idx].LastUpdate = UTCNow()
    		p.Pools[idx].Decommission.StartTime = time.Time{}
    		p.Pools[idx].Decommission.Complete = false
    		p.Pools[idx].Decommission.Failed = true
    		p.Pools[idx].Decommission.Canceled = false
    		return true
    	}
    	return false
    }
    
    func (p *poolMeta) DecommissionCancel(idx int) bool {
    	if p.Pools[idx].Decommission != nil && !p.Pools[idx].Decommission.Canceled {
    Registered: Sun Sep 07 19:28:11 UTC 2025
    - Last Modified: Fri Aug 29 02:39:48 UTC 2025
    - 42.1K bytes
    - Viewed (1)
  4. cmd/rebalance-admin.go

    type rebalanceAdminStatus struct {
    	ID        string                // identifies the ongoing rebalance operation by a uuid
    	Pools     []rebalancePoolStatus `json:"pools"` // contains all pools, including inactive
    	StoppedAt time.Time             `json:"stoppedAt"`
    }
    
    func rebalanceStatus(ctx context.Context, z *erasureServerPools) (r rebalanceAdminStatus, err error) {
    Registered: Sun Sep 07 19:28:11 UTC 2025
    - Last Modified: Fri Aug 29 02:39:48 UTC 2025
    - 3.8K bytes
    - Viewed (0)
  5. cmd/server-main_test.go

    				t.Error("expected success, got failure", err)
    			}
    			if err == nil {
    				if len(sctx.Layout.pools) != 2 {
    					t.Error("expected parsed pools to be 2, not", len(sctx.Layout.pools))
    				}
    				if sctx.Layout.pools[0].cmdline != testcase.hash {
    					t.Error("expected hash", testcase.hash, "got", sctx.Layout.pools[0].cmdline)
    				}
    			}
    		})
    	}
    }
    
    // Tests initializing new object layer.
    Registered: Sun Sep 07 19:28:11 UTC 2025
    - Last Modified: Fri Aug 29 02:39:48 UTC 2025
    - 3.1K bytes
    - Viewed (0)
  6. docs/distributed/CONFIG.md

        address: ":8022"
        ssh-private-key: "/home/user/.ssh/id_rsa"
    ```
    
    If you are using the config `v1` YAML you should migrate your `pools:` field values to the following format
    
    `v1` format
    ```yaml
    pools: # Specify the nodes and drives with pools
      -
        - "https://server-example-pool1:9000/mnt/disk{1...4}/"
        - "https://server{1...2}-pool1:9000/mnt/disk{1...4}/"
    Registered: Sun Sep 07 19:28:11 UTC 2025
    - Last Modified: Tue Jun 25 02:30:18 UTC 2024
    - 4.2K bytes
    - Viewed (0)
  7. cmd/peer-s3-client.go

    	GetPools() []int
    }
    
    type localPeerS3Client struct {
    	node  Node
    	pools []int
    }
    
    func (l *localPeerS3Client) GetHost() string {
    	return l.node.Host
    }
    
    func (l *localPeerS3Client) SetPools(p []int) {
    	l.pools = make([]int, len(p))
    	copy(l.pools, p)
    }
    
    func (l localPeerS3Client) GetPools() []int {
    	return l.pools
    }
    
    Registered: Sun Sep 07 19:28:11 UTC 2025
    - Last Modified: Fri Aug 29 02:39:48 UTC 2025
    - 15.6K bytes
    - Viewed (0)
  8. cmd/testdata/config/2.yaml

    version: v1
    address: ':9000'
    console-address: ':9001'
    certs-dir: '/home/user/.minio/certs/'
    pools: # Specify the nodes and drives with pools
      -
            - 'https://server-example-pool1:9000/mnt/disk{1...4}/'
            - 'https://server1-pool1:9000/mnt/disk{1...4}/'
            - 'https://server3-pool1:9000/mnt/disk{1...4}/'
            - 'https://server4-pool1:9000/mnt/disk{1...4}/'
      -
            - 'https://server-example-pool2:9000/mnt/disk{1...4}/'
    Registered: Sun Sep 07 19:28:11 UTC 2025
    - Last Modified: Thu Dec 07 09:33:56 UTC 2023
    - 869 bytes
    - Viewed (0)
  9. cmd/endpoint-ellipses.go

    	setDriveCount uint64
    }
    
    // buildDisksLayoutFromConfFile supports with and without ellipses transparently.
    func buildDisksLayoutFromConfFile(pools []poolArgs) (layout disksLayout, err error) {
    	if len(pools) == 0 {
    		return layout, errInvalidArgument
    	}
    
    	for _, list := range pools {
    		var endpointsList endpointsList
    
    		for _, arg := range list.args {
    			switch {
    			case ellipses.HasList(arg):
    Registered: Sun Sep 07 19:28:11 UTC 2025
    - Last Modified: Fri Aug 29 02:39:48 UTC 2025
    - 14.6K bytes
    - Viewed (0)
  10. cmd/testdata/config/invalid.yaml

    version:
    address: ':9000'
    console-address: ':9001'
    certs-dir: '/home/user/.minio/certs/'
    pools: # Specify the nodes and drives with pools
      -
            - 'https://server-example-pool1:9000/mnt/disk{1...4}/'
            - 'https://server1-pool1:9000/mnt/disk{1...4}/'
            - 'https://server3-pool1:9000/mnt/disk{1...4}/'
            - 'https://server4-pool1:9000/mnt/disk{1...4}/'
      -
            - 'https://server-example-pool2:9000/mnt/disk{1...4}/'
    Registered: Sun Sep 07 19:28:11 UTC 2025
    - Last Modified: Thu Dec 07 09:33:56 UTC 2023
    - 866 bytes
    - Viewed (0)
Back to top