- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 96 for poolId (0.03 sec)
-
docs/logging/README.md
"X-Amz-Request-Id": "17CDC1F4D7E69123", "X-Content-Type-Options": "nosniff", "X-Xss-Protection": "1; mode=block" }, "tags": { "objectLocation": { "name": "hosts", "poolId": 1, "setId": 1, "drives": [ "/mnt/data1", "/mnt/data2", "/mnt/data3", "/mnt/data4" ] } }, "accessKey": "minioadmin" } ```
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Tue Aug 12 18:20:36 UTC 2025 - 10.5K bytes - Viewed (0) -
cmd/erasure-server-pool.go
Maintenance: opts.Maintenance, SetID: setIdx, PoolID: poolIdx, Healthy: erasureSetUpCount[poolIdx][setIdx].online >= poolWriteQuorums[poolIdx], HealthyRead: erasureSetUpCount[poolIdx][setIdx].online >= poolReadQuorums[poolIdx], HealthyDrives: erasureSetUpCount[poolIdx][setIdx].online, HealingDrives: erasureSetUpCount[poolIdx][setIdx].healing, ReadQuorum: poolReadQuorums[poolIdx],
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 89.1K bytes - Viewed (0) -
cmd/erasure-sets.go
} } type auditObjectOp struct { Name string `json:"name"` Pool int `json:"poolId"` Set int `json:"setId"` } func (op auditObjectOp) String() string { // Flatten the auditObjectOp return fmt.Sprintf("name=%s,pool=%d,set=%d", op.Name, op.Pool, op.Set) } // Add erasure set information to the current context
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 37K bytes - Viewed (1) -
cmd/admin-handlers-pools.go
return } vars := mux.Vars(r) v := vars["pool"] byID := vars["by-id"] == "true" pools := strings.Split(v, ",") poolIndices := make([]int, 0, len(pools)) for _, pool := range pools { var idx int if byID { var err error idx, err = strconv.Atoi(pool) if err != nil { // We didn't find any matching pools, invalid input
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Thu Sep 04 20:47:24 UTC 2025 - 11K bytes - Viewed (0) -
cmd/erasure-server-pool-rebalance.go
z.rebalMeta.PoolStats[poolIdx].Info.Status = status z.rebalMeta.PoolStats[poolIdx].Info.EndTime = now z.rebalMu.Unlock() case <-timer.C: notify = false traceMsg = fmt.Sprintf("saved at %s", time.Now()) } stopFn := globalRebalanceMetrics.log(rebalanceMetricSaveMetadata, poolIdx, traceMsg) err := z.saveRebalanceStats(GlobalContext, poolIdx, rebalSaveStats) stopFn(0, err)
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Thu Sep 04 20:47:24 UTC 2025 - 28.9K bytes - Viewed (0) -
cmd/erasure-server-pool-decom.go
p.Pools[idx].LastUpdate = UTCNow() p.Pools[idx].Decommission.Complete = true p.Pools[idx].Decommission.Failed = false p.Pools[idx].Decommission.Canceled = false return true } return false } func (p *poolMeta) DecommissionFailed(idx int) bool { if p.Pools[idx].Decommission != nil && !p.Pools[idx].Decommission.Failed { p.Pools[idx].LastUpdate = UTCNow() p.Pools[idx].Decommission.StartTime = time.Time{}
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 42.1K bytes - Viewed (1) -
cmd/erasure-server-pool-decom_test.go
for i, pool := range nmeta2.Pools { if i == 0 { nmeta2.Pools[i] = PoolStatus{ CmdLine: pool.CmdLine, ID: i, LastUpdate: UTCNow(), Decommission: &PoolDecommissionInfo{ Complete: false, }, } } } testCases := []struct { meta poolMeta pools []*erasureSets expectedUpdate bool expectedErr bool
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 4.8K bytes - Viewed (0) -
cmd/metrics-v2.go
Description: getClusterHealthStatusMD(), Value: float64(health), }) for _, h := range result.ESHealth { labels := map[string]string{ "pool": strconv.Itoa(h.PoolID), "set": strconv.Itoa(h.SetID), } metrics = append(metrics, MetricV2{ Description: getClusterErasureSetReadQuorumMD(), VariableLabels: labels,
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 133.4K bytes - Viewed (0) -
cmd/peer-s3-client.go
return err }, idx) } errs := g.Wait() var poolErrs []error for poolIdx := 0; poolIdx < sys.poolsCount; poolIdx++ { perPoolErrs := make([]error, 0, len(sys.peerClients)) for i, client := range sys.peerClients { if slices.Contains(client.GetPools(), poolIdx) { perPoolErrs = append(perPoolErrs, errs[i]) } } quorum := len(perPoolErrs) / 2
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 15.6K bytes - Viewed (0) -
cmd/rebalance-admin.go
} if !ps.Participating { continue } // for participating pools, total bytes to be rebalanced by this pool is given by, // pf_c = (f_i + x)/c_i, // pf_c - percentage free space across pools, f_i - ith pool's free space, c_i - ith pool's capacity // i.e. x = c_i*pfc -f_i totalBytesToRebal := float64(ps.InitCapacity)*meta.PercentFreeGoal - float64(ps.InitFreeSpace) elapsed := time.Since(ps.Info.StartTime)
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 3.8K bytes - Viewed (0)