- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 480 for pools (0.02 sec)
-
cmd/admin-handlers-pools.go
return } vars := mux.Vars(r) v := vars["pool"] byID := vars["by-id"] == "true" pools := strings.Split(v, ",") poolIndices := make([]int, 0, len(pools)) for _, pool := range pools { var idx int if byID { var err error idx, err = strconv.Atoi(pool) if err != nil { // We didn't find any matching pools, invalid input
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 11.1K bytes - Viewed (0) -
cmd/erasure-server-pool-decom_test.go
} var nmeta1 poolMeta nmeta1.Version = poolMetaVersion nmeta1.Pools = append(nmeta1.Pools, meta.Pools...) for i, pool := range nmeta1.Pools { if i == 0 { nmeta1.Pools[i] = PoolStatus{ CmdLine: pool.CmdLine, ID: i, LastUpdate: UTCNow(), Decommission: &PoolDecommissionInfo{ Complete: true, }, } } } var nmeta2 poolMeta nmeta2.Version = poolMetaVersion
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 4.8K bytes - Viewed (0) -
cmd/erasure-server-pool-decom.go
p.Pools[idx].LastUpdate = UTCNow() p.Pools[idx].Decommission.Complete = true p.Pools[idx].Decommission.Failed = false p.Pools[idx].Decommission.Canceled = false return true } return false } func (p *poolMeta) DecommissionFailed(idx int) bool { if p.Pools[idx].Decommission != nil && !p.Pools[idx].Decommission.Failed { p.Pools[idx].LastUpdate = UTCNow() p.Pools[idx].Decommission.StartTime = time.Time{}
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 42.2K bytes - Viewed (1) -
cmd/rebalance-admin.go
} if !ps.Participating { continue } // for participating pools, total bytes to be rebalanced by this pool is given by, // pf_c = (f_i + x)/c_i, // pf_c - percentage free space across pools, f_i - ith pool's free space, c_i - ith pool's capacity // i.e. x = c_i*pfc -f_i totalBytesToRebal := float64(ps.InitCapacity)*meta.PercentFreeGoal - float64(ps.InitFreeSpace) elapsed := time.Since(ps.Info.StartTime)Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 3.8K bytes - Viewed (0) -
cmd/erasure-server-pool-decom_gen.go
if err != nil { err = msgp.WrapError(err, "Pools") return } if cap(z.Pools) >= int(zb0002) { z.Pools = (z.Pools)[:zb0002] } else { z.Pools = make([]PoolStatus, zb0002) } for za0001 := range z.Pools { err = z.Pools[za0001].DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "Pools", za0001) return } } default:Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 26.7K bytes - Viewed (0) -
cmd/peer-s3-client.go
GetPools() []int } type localPeerS3Client struct { node Node pools []int } func (l *localPeerS3Client) GetHost() string { return l.node.Host } func (l *localPeerS3Client) SetPools(p []int) { l.pools = make([]int, len(p)) copy(l.pools, p) } func (l localPeerS3Client) GetPools() []int { return l.pools }
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 15.6K bytes - Viewed (0) -
cmd/fmt-gen.go
if err != nil { log.Fatalln(err) } zipFile, err := os.Create("format.json.zip") if err != nil { log.Fatalf("failed to create format.json.zip: %v", err) } defer zipFile.Close() fmtZipW := zip.NewWriter(zipFile) defer fmtZipW.Close() for _, pool := range pools { // for each pool setCount, setDriveCount := pool.SetCount, pool.DrivesPerSetRegistered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 3.7K bytes - Viewed (0) -
docs/distributed/DESIGN.md
- MinIO also supports expansion of existing clusters in server pools. Each pool is a self contained entity with same SLA's (read/write quorum) for each object as original cluster. By using the existing namespace for lookup validation MinIO ensures conflicting objects are not created. When no such object exists then MinIO simply uses the least used pool to place new objects. ### There are no limits on how many server pools can be combined ```
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Wed Feb 26 09:25:50 UTC 2025 - 8K bytes - Viewed (2) -
cmd/endpoint-ellipses.go
setDriveCount uint64 } // buildDisksLayoutFromConfFile supports with and without ellipses transparently. func buildDisksLayoutFromConfFile(pools []poolArgs) (layout disksLayout, err error) { if len(pools) == 0 { return layout, errInvalidArgument } for _, list := range pools { var endpointsList endpointsList for _, arg := range list.args { switch { case ellipses.HasList(arg):
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 14.7K bytes - Viewed (0) -
cmd/server-main_test.go
t.Error("expected success, got failure", err) } if err == nil { if len(sctx.Layout.pools) != 2 { t.Error("expected parsed pools to be 2, not", len(sctx.Layout.pools)) } if sctx.Layout.pools[0].cmdline != testcase.hash { t.Error("expected hash", testcase.hash, "got", sctx.Layout.pools[0].cmdline) } } }) } } // Tests initializing new object layer.
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 3.1K bytes - Viewed (0)