- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 114 for poolId (0.04 sec)
-
docs/logging/README.md
"X-Amz-Request-Id": "17CDC1F4D7E69123", "X-Content-Type-Options": "nosniff", "X-Xss-Protection": "1; mode=block" }, "tags": { "objectLocation": { "name": "hosts", "poolId": 1, "setId": 1, "drives": [ "/mnt/data1", "/mnt/data2", "/mnt/data3", "/mnt/data4" ] } }, "accessKey": "minioadmin" } ```
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Thu May 09 17:15:03 UTC 2024 - 10.4K bytes - Viewed (0) -
cmd/erasure-sets.go
} } type auditObjectOp struct { Name string `json:"name"` Pool int `json:"poolId"` Set int `json:"setId"` } func (op auditObjectOp) String() string { // Flatten the auditObjectOp return fmt.Sprintf("name=%s,pool=%d,set=%d", op.Name, op.Pool, op.Set) } // Add erasure set information to the current context
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Sep 27 10:41:37 UTC 2024 - 37K bytes - Viewed (1) -
okhttp/src/main/kotlin/okhttp3/internal/connection/RealRoutePlanner.kt
return planConnectToRoute(newRouteSelection.next(), newRouteSelection.routes) } /** * Returns a plan to reuse a pooled connection, or null if the pool doesn't have a connection for * this address. * * If [planToReplace] is non-null, this will swap it for a pooled connection if that pooled * connection uses HTTP/2. That results in fewer sockets overall and thus fewer TCP slow starts. */
Registered: Fri Nov 01 11:42:11 UTC 2024 - Last Modified: Sat Apr 20 17:03:43 UTC 2024 - 12K bytes - Viewed (0) -
cmd/erasure-server-pool-rebalance.go
z.rebalMu.Lock() z.rebalMeta.PoolStats[poolIdx].Info.Status = status z.rebalMeta.PoolStats[poolIdx].Info.EndTime = now z.rebalMu.Unlock() case <-timer.C: traceMsg = fmt.Sprintf("saved at %s", time.Now()) } stopFn := globalRebalanceMetrics.log(rebalanceMetricSaveMetadata, poolIdx, traceMsg) err := z.saveRebalanceStats(GlobalContext, poolIdx, rebalSaveStats) stopFn(0, err)
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Sep 06 13:20:19 UTC 2024 - 28.4K bytes - Viewed (0) -
cmd/erasure-server-pool-decom.go
p.Pools[idx].LastUpdate = UTCNow() p.Pools[idx].Decommission.Complete = true p.Pools[idx].Decommission.Failed = false p.Pools[idx].Decommission.Canceled = false return true } return false } func (p *poolMeta) DecommissionFailed(idx int) bool { if p.Pools[idx].Decommission != nil && !p.Pools[idx].Decommission.Failed { p.Pools[idx].LastUpdate = UTCNow() p.Pools[idx].Decommission.StartTime = time.Time{}
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Sep 06 13:20:19 UTC 2024 - 42.2K bytes - Viewed (0) -
cmd/admin-handlers-pools.go
return } vars := mux.Vars(r) v := vars["pool"] byID := vars["by-id"] == "true" pools := strings.Split(v, ",") poolIndices := make([]int, 0, len(pools)) for _, pool := range pools { var idx int if byID { var err error idx, err = strconv.Atoi(pool) if err != nil { // We didn't find any matching pools, invalid input
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Jun 28 00:22:30 UTC 2024 - 10.9K bytes - Viewed (0) -
cmd/erasure-server-pool-decom_gen.go
if err != nil { err = msgp.WrapError(err, "Pools") return } if cap(z.Pools) >= int(zb0002) { z.Pools = (z.Pools)[:zb0002] } else { z.Pools = make([]PoolStatus, zb0002) } for za0001 := range z.Pools { err = z.Pools[za0001].DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "Pools", za0001) return } } default:
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Mon Jul 04 21:02:54 UTC 2022 - 26.7K bytes - Viewed (0) -
cmd/metacache-server-pool.go
// Use ID as the object name... o.pool = z.getAvailablePoolIdx(ctx, minioMetaBucket, o.ID, 10<<20) if o.pool < 0 { // No space or similar, don't persist the listing. o.pool = 0 o.Create = false o.ID = "" o.Transient = true return entries, errDiskFull } o.set = z.serverPools[o.pool].getHashedSetIndex(o.ID) saver := z.serverPools[o.pool].sets[o.set]
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Jul 12 16:23:16 UTC 2024 - 12.7K bytes - Viewed (0) -
cmd/peer-s3-client.go
return err }, idx) } errs := g.Wait() var poolErrs []error for poolIdx := 0; poolIdx < sys.poolsCount; poolIdx++ { perPoolErrs := make([]error, 0, len(sys.peerClients)) for i, client := range sys.peerClients { if slices.Contains(client.GetPools(), poolIdx) { perPoolErrs = append(perPoolErrs, errs[i]) } } quorum := len(perPoolErrs) / 2
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Tue Aug 13 22:26:05 UTC 2024 - 15.4K bytes - Viewed (0) -
cmd/object-api-interface.go
GetDisks(poolIdx, setIdx int) ([]StorageAPI, error) // return the disks belonging to pool and set. SetDriveCounts() []int // list of erasure stripe size for each pool in order. // Healing operations. HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error)
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Thu Aug 22 21:57:20 UTC 2024 - 17.3K bytes - Viewed (0)