- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 31 for polos (0.02 sec)
-
cmd/admin-handlers-pools.go
} vars := mux.Vars(r) v := vars["pool"] byID := vars["by-id"] == "true" pools := strings.Split(v, ",") poolIndices := make([]int, 0, len(pools)) for _, pool := range pools { var idx int if byID { var err error idx, err = strconv.Atoi(pool) if err != nil { // We didn't find any matching pools, invalid input writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Thu Sep 04 20:47:24 UTC 2025 - 11K bytes - Viewed (0) -
cmd/rebalance-admin.go
type rebalanceAdminStatus struct { ID string // identifies the ongoing rebalance operation by a uuid Pools []rebalancePoolStatus `json:"pools"` // contains all pools, including inactive StoppedAt time.Time `json:"stoppedAt"` } func rebalanceStatus(ctx context.Context, z *erasureServerPools) (r rebalanceAdminStatus, err error) {
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 3.8K bytes - Viewed (0) -
cmd/erasure-server-pool-decom_test.go
} var nmeta1 poolMeta nmeta1.Version = poolMetaVersion nmeta1.Pools = append(nmeta1.Pools, meta.Pools...) for i, pool := range nmeta1.Pools { if i == 0 { nmeta1.Pools[i] = PoolStatus{ CmdLine: pool.CmdLine, ID: i, LastUpdate: UTCNow(), Decommission: &PoolDecommissionInfo{ Complete: true, }, } } } var nmeta2 poolMeta nmeta2.Version = poolMetaVersion
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 4.8K bytes - Viewed (0) -
cmd/erasure-server-pool-decom.go
p.Pools[idx].LastUpdate = UTCNow() p.Pools[idx].Decommission.StartTime = time.Time{} p.Pools[idx].Decommission.Complete = false p.Pools[idx].Decommission.Failed = true p.Pools[idx].Decommission.Canceled = false return true } return false } func (p *poolMeta) DecommissionCancel(idx int) bool { if p.Pools[idx].Decommission != nil && !p.Pools[idx].Decommission.Canceled {
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 42.1K bytes - Viewed (1) -
cmd/server-main_test.go
t.Error("expected success, got failure", err) } if err == nil { if len(sctx.Layout.pools) != 2 { t.Error("expected parsed pools to be 2, not", len(sctx.Layout.pools)) } if sctx.Layout.pools[0].cmdline != testcase.hash { t.Error("expected hash", testcase.hash, "got", sctx.Layout.pools[0].cmdline) } } }) } } // Tests initializing new object layer.
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 3.1K bytes - Viewed (0) -
cmd/peer-s3-client.go
GetPools() []int } type localPeerS3Client struct { node Node pools []int } func (l *localPeerS3Client) GetHost() string { return l.node.Host } func (l *localPeerS3Client) SetPools(p []int) { l.pools = make([]int, len(p)) copy(l.pools, p) } func (l localPeerS3Client) GetPools() []int { return l.pools }
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 15.6K bytes - Viewed (0) -
cmd/endpoint-ellipses.go
setDriveCount uint64 } // buildDisksLayoutFromConfFile supports with and without ellipses transparently. func buildDisksLayoutFromConfFile(pools []poolArgs) (layout disksLayout, err error) { if len(pools) == 0 { return layout, errInvalidArgument } for _, list := range pools { var endpointsList endpointsList for _, arg := range list.args { switch { case ellipses.HasList(arg):
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 14.6K bytes - Viewed (0) -
cmd/fmt-gen.go
deploymentID := ctxt.String("deployment-id") err := buildServerCtxt(ctxt, &globalServerCtxt) if err != nil { log.Fatalln(err) } handleCommonArgs(globalServerCtxt) pools, _, err := createServerEndpoints(globalMinioAddr, globalServerCtxt.Layout.pools, globalServerCtxt.Layout.legacy) if err != nil { log.Fatalln(err) } zipFile, err := os.Create("format.json.zip") if err != nil {
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 3.7K bytes - Viewed (0) -
cmd/erasure-server-pool.go
}) if err != nil { return nil, err } if deploymentID == "" { // all pools should have same deployment ID deploymentID = formats[i].ID } // Validate if users brought different DeploymentID pools. if deploymentID != formats[i].ID { return nil, fmt.Errorf("all pools must have same deployment ID - expected %s, got %s for pool(%s)", deploymentID, formats[i].ID, humanize.Ordinal(i+1)) }
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 89.1K bytes - Viewed (0) -
docs/distributed/README.md
endlessly, so you can perpetually expand your clusters as needed. When you restart, it is immediate and non-disruptive to the applications. Each group of servers in the command-line is called a pool. There are 2 server pools in this example. New objects are placed in server pools in proportion to the amount of free space in each pool. Within each pool, the location of the erasure-set of drives is determined based on a deterministic hashing algorithm. > **NOTE:** **Each pool you add must...
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Tue Aug 12 18:20:36 UTC 2025 - 8.9K bytes - Viewed (0)