- Sort Score
- Result 10 results
- Languages All
Results 381 - 390 of 441 for Tools (0.05 sec)
- 
				
				doc/go_spec.htmlare different characters. </p> <p> Implementation restriction: For compatibility with other tools, a compiler may disallow the NUL character (U+0000) in the source text. </p> <p> Implementation restriction: For compatibility with other tools, a compiler may ignore a UTF-8-encoded byte order mark (U+FEFF) if it is the first Unicode code point in the source text. Registered: Tue Sep 09 11:13:09 UTC 2025 - Last Modified: Tue May 06 19:12:15 UTC 2025 - 286.2K bytes - Viewed (0)
- 
				
				cmd/erasure-server-pool-decom_test.go} var nmeta1 poolMeta nmeta1.Version = poolMetaVersion nmeta1.Pools = append(nmeta1.Pools, meta.Pools...) for i, pool := range nmeta1.Pools { if i == 0 { nmeta1.Pools[i] = PoolStatus{ CmdLine: pool.CmdLine, ID: i, LastUpdate: UTCNow(), Decommission: &PoolDecommissionInfo{ Complete: true, }, } } } var nmeta2 poolMeta nmeta2.Version = poolMetaVersion Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 4.8K bytes - Viewed (0)
- 
				
				docs/distributed/DECOMMISSION.md# Decommissioning Decommissiong is a mechanism in MinIO to drain older pools (usually with old hardware) and migrate the content from such pools to a newer pools (usually better hardware). Decommissioning spreads the data across all pools - for example, if you decommission `pool1`, all the data from `pool1` spreads across `pool2` and `pool3`. ## Features Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Mon Jul 11 14:59:49 UTC 2022 - 8.3K bytes - Viewed (0)
- 
				
				cmd/erasure-server-pool-decom.gop.Pools[idx].LastUpdate = UTCNow() p.Pools[idx].Decommission.StartTime = time.Time{} p.Pools[idx].Decommission.Complete = false p.Pools[idx].Decommission.Failed = true p.Pools[idx].Decommission.Canceled = false return true } return false } func (p *poolMeta) DecommissionCancel(idx int) bool { if p.Pools[idx].Decommission != nil && !p.Pools[idx].Decommission.Canceled { Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 42.1K bytes - Viewed (1)
- 
				
				cmd/erasure-server-pool-decom_gen.goif err != nil { err = msgp.WrapError(err, "Pools") return } if cap(z.Pools) >= int(zb0002) { z.Pools = (z.Pools)[:zb0002] } else { z.Pools = make([]PoolStatus, zb0002) } for za0001 := range z.Pools { err = z.Pools[za0001].DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "Pools", za0001) return } } default:Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Mon Jul 04 21:02:54 UTC 2022 - 26.7K bytes - Viewed (0)
- 
				
				cmd/rebalance-admin.gotype rebalanceAdminStatus struct { ID string // identifies the ongoing rebalance operation by a uuid Pools []rebalancePoolStatus `json:"pools"` // contains all pools, including inactive StoppedAt time.Time `json:"stoppedAt"` } func rebalanceStatus(ctx context.Context, z *erasureServerPools) (r rebalanceAdminStatus, err error) {Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 3.8K bytes - Viewed (0)
- 
				
				docs/distributed/CONFIG.mdaddress: ":8022" ssh-private-key: "/home/user/.ssh/id_rsa" ``` If you are using the config `v1` YAML you should migrate your `pools:` field values to the following format `v1` format ```yaml pools: # Specify the nodes and drives with pools - - "https://server-example-pool1:9000/mnt/disk{1...4}/" - "https://server{1...2}-pool1:9000/mnt/disk{1...4}/"Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Tue Jun 25 02:30:18 UTC 2024 - 4.2K bytes - Viewed (0)
- 
				
				cmd/server-main_test.got.Error("expected success, got failure", err) } if err == nil { if len(sctx.Layout.pools) != 2 { t.Error("expected parsed pools to be 2, not", len(sctx.Layout.pools)) } if sctx.Layout.pools[0].cmdline != testcase.hash { t.Error("expected hash", testcase.hash, "got", sctx.Layout.pools[0].cmdline) } } }) } } // Tests initializing new object layer. Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 3.1K bytes - Viewed (0)
- 
				
				cmd/peer-s3-client.goGetPools() []int } type localPeerS3Client struct { node Node pools []int } func (l *localPeerS3Client) GetHost() string { return l.node.Host } func (l *localPeerS3Client) SetPools(p []int) { l.pools = make([]int, len(p)) copy(l.pools, p) } func (l localPeerS3Client) GetPools() []int { return l.pools } Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 15.6K bytes - Viewed (0)
- 
				
				cmd/testdata/config/2.yamlversion: v1 address: ':9000' console-address: ':9001' certs-dir: '/home/user/.minio/certs/' pools: # Specify the nodes and drives with pools - - 'https://server-example-pool1:9000/mnt/disk{1...4}/' - 'https://server1-pool1:9000/mnt/disk{1...4}/' - 'https://server3-pool1:9000/mnt/disk{1...4}/' - 'https://server4-pool1:9000/mnt/disk{1...4}/' - - 'https://server-example-pool2:9000/mnt/disk{1...4}/'Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Thu Dec 07 09:33:56 UTC 2023 - 869 bytes - Viewed (0)