- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 701 for pools (0.08 sec)
-
cmd/globals.go
// diskFillFraction is the fraction of a disk we allow to be filled. diskFillFraction = 0.99 // diskReserveFraction is the fraction of a disk where we will fill other server pools first. // If all pools reach this, we will use all pools with regular placement. diskReserveFraction = 0.15 // diskAssumeUnknownSize is the size to assume when an unknown size upload is requested. diskAssumeUnknownSize = 1 << 30
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Tue Sep 03 18:23:41 UTC 2024 - 16.2K bytes - Viewed (1) -
.github/workflows/mint.yml
run: | ${GITHUB_WORKSPACE}/.github/workflows/run-mint.sh "compress-encrypt" "minio" "minio123" "${{ steps.vars.outputs.sha_short }}" - name: multiple pools run: | ${GITHUB_WORKSPACE}/.github/workflows/run-mint.sh "pools" "minio" "minio123" "${{ steps.vars.outputs.sha_short }}" - name: standalone erasure run: |
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Tue Jun 04 15:12:57 UTC 2024 - 2.9K bytes - Viewed (0) -
cmd/erasure-server-pool.go
func (z *erasureServerPools) poolsWithObject(pools []PoolObjInfo, opts ObjectOptions) (errs []poolErrs) { for _, pool := range pools { if opts.SkipDecommissioned && z.IsSuspended(pool.Index) { continue } // Skip object if it's from pools participating in a rebalance operation. if opts.SkipRebalancing && z.IsPoolRebalancing(pool.Index) { continue } if isErrReadQuorum(pool.Err) || pool.Err == nil {
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Sun Sep 29 22:40:36 UTC 2024 - 89.8K bytes - Viewed (0) -
.github/workflows/run-mint.sh
docker volume prune -f || true docker volume rm $(docker volume ls -q -f dangling=true) || true # Stop two nodes, one of each pool, to check that all S3 calls work while quorum is still there [ "${MODE}" == "pools" ] && docker-compose -f minio-${MODE}.yaml stop minio2 [ "${MODE}" == "pools" ] && docker-compose -f minio-${MODE}.yaml stop minio6 # Pause one node, to check that all S3 calls work while one node goes wrong
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Wed May 22 23:07:14 UTC 2024 - 1.9K bytes - Viewed (0) -
cmd/server-main.go
if err != nil { return err } setDriveCount := uint64(v) var pools []poolArgs switch cv.Version { case "v1": cfV1 := config.ServerConfigV1{} if err = yaml.Unmarshal(rd, &cfV1); err != nil { return err } pools = make([]poolArgs, 0, len(cfV1.Pools)) for _, list := range cfV1.Pools { pools = append(pools, poolArgs{ args: list, setDriveCount: setDriveCount, })
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Tue Sep 24 21:50:11 UTC 2024 - 35.2K bytes - Viewed (2) -
cmd/endpoint.go
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Jun 21 22:22:24 UTC 2024 - 34.2K bytes - Viewed (0) -
cmd/metrics-v3-cluster-erasure-set.go
erasureSetOnlineDrivesCountMD = NewGaugeMD(erasureSetOnlineDrivesCount, "Count of online drives in the erasure set in a pool", poolIDL, setIDL) erasureSetHealingDrivesCountMD = NewGaugeMD(erasureSetHealingDrivesCount, "Count of healing drives in the erasure set in a pool", poolIDL, setIDL) erasureSetHealthMD = NewGaugeMD(erasureSetHealth, "Health of the erasure set in a pool (1=healthy, 0=unhealthy)", poolIDL, setIDL)
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Tue May 14 07:25:56 UTC 2024 - 4.4K bytes - Viewed (0) -
guava/src/com/google/common/util/concurrent/MoreExecutors.java
* is complete. It does so by using daemon threads and adding a shutdown hook to wait for their * completion. * * <p>This is mainly for fixed thread pools. See {@link Executors#newFixedThreadPool(int)}. * * @param executor the executor to modify to make sure it exits when the application is finished
Registered: Fri Nov 01 12:43:10 UTC 2024 - Last Modified: Sat Oct 19 00:51:36 UTC 2024 - 44.1K bytes - Viewed (0) -
docs/distributed/distributed-from-config-file.sh
consolePort="$((s3Port + 1000))" cat <<EOF >/tmp/minio.configfile.$i version: v1 address: ':${s3Port}' console-address: ':${consolePort}' rootUser: 'minr0otUS2r' rootPassword: 'pBU94AGAY85e' pools: # Specify the nodes and drives with pools - - 'http://localhost:9001/tmp/xl/node9001/mnt/disk{1...4}/' - 'http://localhost:9002/tmp/xl/node9002/mnt/disk{1,2,3,4}/' - - 'http://localhost:9003/tmp/xl/node9003/mnt/disk{1...4}/'
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Jun 28 09:06:49 UTC 2024 - 3.3K bytes - Viewed (0) -
docs/distributed/README.md
> **NOTE:** **Each pool you add must have the same erasure coding parity configuration as the original pool, so the same data redundancy SLA is maintained.**
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Thu Jan 18 07:03:17 UTC 2024 - 8.8K bytes - Viewed (0)