- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 152 for poolEps (0.12 sec)
-
cmd/storage-rest_test.go
endpoint.PoolIdx = 0 endpoint.SetIdx = 0 endpoint.DiskIdx = 0 poolEps := []PoolEndpoints{{ Endpoints: Endpoints{endpoint}, }} poolEps[0].SetCount = 1 poolEps[0].DrivesPerSet = 1 // Register handlers on newly created servers registerStorageRESTHandlers(tg.Mux[0], poolEps, tg.Managers[0]) registerStorageRESTHandlers(tg.Mux[1], poolEps, tg.Managers[1]) storage := globalLocalSetDrives[0][0][0]
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Mon Jul 22 07:04:48 UTC 2024 - 11.5K bytes - Viewed (0) -
cmd/admin-handlers-pools.go
} vars := mux.Vars(r) v := vars["pool"] byID := vars["by-id"] == "true" pools := strings.Split(v, ",") poolIndices := make([]int, 0, len(pools)) for _, pool := range pools { var idx int if byID { var err error idx, err = strconv.Atoi(pool) if err != nil { // We didn't find any matching pools, invalid input writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errInvalidArgument), r.URL)
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Jun 28 00:22:30 UTC 2024 - 10.9K bytes - Viewed (0) -
cmd/rebalance-admin.go
} if !ps.Participating { continue } // for participating pools, total bytes to be rebalanced by this pool is given by, // pf_c = (f_i + x)/c_i, // pf_c - percentage free space across pools, f_i - ith pool's free space, c_i - ith pool's capacity // i.e. x = c_i*pfc -f_i totalBytesToRebal := float64(ps.InitCapacity)*meta.PercentFreeGoal - float64(ps.InitFreeSpace) elapsed := time.Since(ps.Info.StartTime)
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Dec 22 00:56:43 UTC 2023 - 3.8K bytes - Viewed (0) -
.github/workflows/mint/minio-pools.yaml
Harshavardhana <******@****.***> 1699046298 -0700
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Nov 03 21:18:18 UTC 2023 - 2.3K bytes - Viewed (0) -
docs/distributed/DESIGN.md
Refer to the sizing guide with details on the default parity count chosen for different erasure stripe sizes [here](https://github.com/minio/minio/blob/master/docs/distributed/SIZING.md) MinIO places new objects in server pools based on proportionate free space, per pool. Following pseudo code demonstrates this behavior. ```go func getAvailablePoolIdx(ctx context.Context) int {
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Tue Aug 15 23:04:20 UTC 2023 - 8K bytes - Viewed (0) -
docs/distributed/DECOMMISSION.md
# Decommissioning Decommissiong is a mechanism in MinIO to drain older pools (usually with old hardware) and migrate the content from such pools to a newer pools (usually better hardware). Decommissioning spreads the data across all pools - for example, if you decommission `pool1`, all the data from `pool1` spreads across `pool2` and `pool3`. ## Features
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Mon Jul 11 14:59:49 UTC 2022 - 8.3K bytes - Viewed (0) -
cmd/erasure-server-pool-decom_test.go
} var nmeta1 poolMeta nmeta1.Version = poolMetaVersion nmeta1.Pools = append(nmeta1.Pools, meta.Pools...) for i, pool := range nmeta1.Pools { if i == 0 { nmeta1.Pools[i] = PoolStatus{ CmdLine: pool.CmdLine, ID: i, LastUpdate: UTCNow(), Decommission: &PoolDecommissionInfo{ Complete: true, }, } } } var nmeta2 poolMeta nmeta2.Version = poolMetaVersion
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Mon Jul 01 14:38:46 UTC 2024 - 4.8K bytes - Viewed (0) -
cmd/erasure-server-pool.go
}) if err != nil { return nil, err } if deploymentID == "" { // all pools should have same deployment ID deploymentID = formats[i].ID } // Validate if users brought different DeploymentID pools. if deploymentID != formats[i].ID { return nil, fmt.Errorf("all pools must have same deployment ID - expected %s, got %s for pool(%s)", deploymentID, formats[i].ID, humanize.Ordinal(i+1)) }
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Sun Sep 29 22:40:36 UTC 2024 - 89.8K bytes - Viewed (0) -
cmd/erasure-server-pool-decom_gen.go
if err != nil { err = msgp.WrapError(err, "Pools") return } if cap(z.Pools) >= int(zb0002) { z.Pools = (z.Pools)[:zb0002] } else { z.Pools = make([]PoolStatus, zb0002) } for za0001 := range z.Pools { err = z.Pools[za0001].DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "Pools", za0001) return } } default:
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Mon Jul 04 21:02:54 UTC 2022 - 26.7K bytes - Viewed (0) -
cmd/erasure-server-pool-decom.go
p.Pools[idx].LastUpdate = UTCNow() p.Pools[idx].Decommission.StartTime = time.Time{} p.Pools[idx].Decommission.Complete = false p.Pools[idx].Decommission.Failed = true p.Pools[idx].Decommission.Canceled = false return true } return false } func (p *poolMeta) DecommissionCancel(idx int) bool { if p.Pools[idx].Decommission != nil && !p.Pools[idx].Decommission.Canceled {
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Sep 06 13:20:19 UTC 2024 - 42.2K bytes - Viewed (0)