- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 163 for poolId (0.07 sec)
-
cmd/metrics-v3-cluster-erasure-set.go
erasureSetWriteQuorumMD = NewGaugeMD(erasureSetWriteQuorum, "Write quorum for the erasure set in a pool", poolIDL, setIDL) erasureSetOnlineDrivesCountMD = NewGaugeMD(erasureSetOnlineDrivesCount, "Count of online drives in the erasure set in a pool", poolIDL, setIDL) erasureSetHealingDrivesCountMD = NewGaugeMD(erasureSetHealingDrivesCount, "Count of healing drives in the erasure set in a pool", poolIDL, setIDL)
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Tue May 14 07:25:56 UTC 2024 - 4.4K bytes - Viewed (0) -
docs/logging/README.md
"X-Amz-Request-Id": "17CDC1F4D7E69123", "X-Content-Type-Options": "nosniff", "X-Xss-Protection": "1; mode=block" }, "tags": { "objectLocation": { "name": "hosts", "poolId": 1, "setId": 1, "drives": [ "/mnt/data1", "/mnt/data2", "/mnt/data3", "/mnt/data4" ] } }, "accessKey": "minioadmin" } ```
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Tue Aug 12 18:20:36 UTC 2025 - 10.5K bytes - Viewed (0) -
cmd/erasure-server-pool.go
Maintenance: opts.Maintenance, SetID: setIdx, PoolID: poolIdx, Healthy: erasureSetUpCount[poolIdx][setIdx].online >= poolWriteQuorums[poolIdx], HealthyRead: erasureSetUpCount[poolIdx][setIdx].online >= poolReadQuorums[poolIdx], HealthyDrives: erasureSetUpCount[poolIdx][setIdx].online, HealingDrives: erasureSetUpCount[poolIdx][setIdx].healing, ReadQuorum: poolReadQuorums[poolIdx],
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 89.1K bytes - Viewed (0) -
cmd/erasure-sets.go
} } type auditObjectOp struct { Name string `json:"name"` Pool int `json:"poolId"` Set int `json:"setId"` } func (op auditObjectOp) String() string { // Flatten the auditObjectOp return fmt.Sprintf("name=%s,pool=%d,set=%d", op.Name, op.Pool, op.Set) } // Add erasure set information to the current context
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 37K bytes - Viewed (1) -
cmd/admin-handlers-pools.go
return } vars := mux.Vars(r) v := vars["pool"] byID := vars["by-id"] == "true" pools := strings.Split(v, ",") poolIndices := make([]int, 0, len(pools)) for _, pool := range pools { var idx int if byID { var err error idx, err = strconv.Atoi(pool) if err != nil { // We didn't find any matching pools, invalid input
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Thu Sep 04 20:47:24 UTC 2025 - 11K bytes - Viewed (0) -
docs/distributed/CONFIG.md
`v1` format ```yaml pools: # Specify the nodes and drives with pools - - "https://server-example-pool1:9000/mnt/disk{1...4}/" - "https://server{1...2}-pool1:9000/mnt/disk{1...4}/" - "https://server3-pool1:9000/mnt/disk{1...4}/" - "https://server4-pool1:9000/mnt/disk{1...4}/" ``` to `v2` format ```yaml pools: - args:
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Tue Jun 25 02:30:18 UTC 2024 - 4.2K bytes - Viewed (0) -
cmd/erasure-server-pool-rebalance.go
z.rebalMeta.PoolStats[poolIdx].Info.Status = status z.rebalMeta.PoolStats[poolIdx].Info.EndTime = now z.rebalMu.Unlock() case <-timer.C: notify = false traceMsg = fmt.Sprintf("saved at %s", time.Now()) } stopFn := globalRebalanceMetrics.log(rebalanceMetricSaveMetadata, poolIdx, traceMsg) err := z.saveRebalanceStats(GlobalContext, poolIdx, rebalSaveStats) stopFn(0, err)
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Thu Sep 04 20:47:24 UTC 2025 - 28.9K bytes - Viewed (0) -
cmd/erasure-server-pool-decom.go
p.Pools[idx].LastUpdate = UTCNow() p.Pools[idx].Decommission.Complete = true p.Pools[idx].Decommission.Failed = false p.Pools[idx].Decommission.Canceled = false return true } return false } func (p *poolMeta) DecommissionFailed(idx int) bool { if p.Pools[idx].Decommission != nil && !p.Pools[idx].Decommission.Failed { p.Pools[idx].LastUpdate = UTCNow() p.Pools[idx].Decommission.StartTime = time.Time{}
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 42.1K bytes - Viewed (1) -
okhttp/src/commonJvmAndroid/kotlin/okhttp3/internal/connection/RealRoutePlanner.kt
return planConnectToRoute(newRouteSelection.next(), newRouteSelection.routes) } /** * Returns a plan to reuse a pooled connection, or null if the pool doesn't have a connection for * this address. * * If [planToReplace] is non-null, this will swap it for a pooled connection if that pooled * connection uses HTTP/2. That results in fewer sockets overall and thus fewer TCP slow starts. */
Registered: Fri Sep 05 11:42:10 UTC 2025 - Last Modified: Wed May 28 23:28:25 UTC 2025 - 12K bytes - Viewed (0) -
cmd/testdata/config/invalid.yaml
console-address: ':9001' certs-dir: '/home/user/.minio/certs/' pools: # Specify the nodes and drives with pools - - 'https://server-example-pool1:9000/mnt/disk{1...4}/' - 'https://server1-pool1:9000/mnt/disk{1...4}/' - 'https://server3-pool1:9000/mnt/disk{1...4}/' - 'https://server4-pool1:9000/mnt/disk{1...4}/' - - 'https://server-example-pool2:9000/mnt/disk{1...4}/'
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Thu Dec 07 09:33:56 UTC 2023 - 866 bytes - Viewed (0)