- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 73 for poolEps (0.07 sec)
-
.github/workflows/mint.yml
run: | ${GITHUB_WORKSPACE}/.github/workflows/run-mint.sh "compress-encrypt" "minio" "minio123" "${{ steps.vars.outputs.sha_short }}" - name: multiple pools run: | ${GITHUB_WORKSPACE}/.github/workflows/run-mint.sh "pools" "minio" "minio123" "${{ steps.vars.outputs.sha_short }}" - name: standalone erasure run: |
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Wed Apr 09 14:28:39 UTC 2025 - 2.9K bytes - Viewed (0) -
cmd/testdata/config/invalid-types.yaml
version: v1 address: ':9000' console-address: ':9001' certs-dir: '/home/user/.minio/certs/' pools: # Specify the nodes and drives with pools - - '/mnt/disk{1...4}/' - 'https://server1-pool1:9000/mnt/disk{1...4}/' - 'https://server3-pool1:9000/mnt/disk{1...4}/' - 'https://server4-pool1:9000/mnt/disk{1...4}/' - - 'https://server-example-pool2:9000/mnt/disk{1...4}/'
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Thu Dec 07 09:33:56 UTC 2023 - 836 bytes - Viewed (0) -
cmd/fmt-gen.go
deploymentID := ctxt.String("deployment-id") err := buildServerCtxt(ctxt, &globalServerCtxt) if err != nil { log.Fatalln(err) } handleCommonArgs(globalServerCtxt) pools, _, err := createServerEndpoints(globalMinioAddr, globalServerCtxt.Layout.pools, globalServerCtxt.Layout.legacy) if err != nil { log.Fatalln(err) } zipFile, err := os.Create("format.json.zip") if err != nil {
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 3.7K bytes - Viewed (0) -
cmd/testdata/config/invalid-disks.yaml
version: v1 address: ':9000' console-address: ':9001' certs-dir: '/home/user/.minio/certs/' pools: # Specify the nodes and drives with pools - - 'https://server-example-pool1:9000/mnt/disk1/' - 'https://server1-pool1:9000/mnt/disk{1...4}/' - 'https://server3-pool1:9000/mnt/disk{1...4}/' - 'https://server4-pool1:9000/mnt/disk{1...4}/' - - 'https://server-example-pool2:9000/mnt/disk{1...4}/'
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Thu Dec 07 09:33:56 UTC 2023 - 863 bytes - Viewed (0) -
docs/distributed/distributed-from-config-file.sh
consolePort="$((s3Port + 1000))" cat <<EOF >/tmp/minio.configfile.$i version: v1 address: ':${s3Port}' console-address: ':${consolePort}' rootUser: 'minr0otUS2r' rootPassword: 'pBU94AGAY85e' pools: # Specify the nodes and drives with pools - - 'http://localhost:9001/tmp/xl/node9001/mnt/disk{1...4}/' - 'http://localhost:9002/tmp/xl/node9002/mnt/disk{1,2,3,4}/' - - 'http://localhost:9003/tmp/xl/node9003/mnt/disk{1...4}/'
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Jun 28 09:06:49 UTC 2024 - 3.3K bytes - Viewed (0) -
cmd/metrics-v3-cluster-erasure-set.go
const ( poolIDL = "pool_id" setIDL = "set_id" ) var ( erasureSetOverallWriteQuorumMD = NewGaugeMD(erasureSetOverallWriteQuorum, "Overall write quorum across pools and sets") erasureSetOverallHealthMD = NewGaugeMD(erasureSetOverallHealth, "Overall health across pools and sets (1=healthy, 0=unhealthy)") erasureSetReadQuorumMD = NewGaugeMD(erasureSetReadQuorum, "Read quorum for the erasure set in a pool", poolIDL, setIDL)
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Tue May 14 07:25:56 UTC 2024 - 4.4K bytes - Viewed (0) -
.github/workflows/run-mint.sh
docker volume rm $(docker volume ls -q -f dangling=true) || true # Stop two nodes, one of each pool, to check that all S3 calls work while quorum is still there [ "${MODE}" == "pools" ] && docker-compose -f minio-${MODE}.yaml stop minio2 [ "${MODE}" == "pools" ] && docker-compose -f minio-${MODE}.yaml stop minio6 # Pause one node, to check that all S3 calls work while one node goes wrong
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Mon Jan 20 14:49:07 UTC 2025 - 1.9K bytes - Viewed (0) -
cmd/server-main.go
if err != nil { return err } setDriveCount := uint64(v) var pools []poolArgs switch cv.Version { case "v1": cfV1 := config.ServerConfigV1{} if err = yaml.Unmarshal(rd, &cfV1); err != nil { return err } pools = make([]poolArgs, 0, len(cfV1.Pools)) for _, list := range cfV1.Pools { pools = append(pools, poolArgs{ args: list, setDriveCount: setDriveCount, })
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Tue May 27 15:18:36 UTC 2025 - 35.9K bytes - Viewed (4) -
schema/pool.go
package schema import ( "reflect" "sync" ) // sync pools var ( normalPool sync.Map poolInitializer = func(reflectType reflect.Type) FieldNewValuePool { v, _ := normalPool.LoadOrStore(reflectType, &sync.Pool{ New: func() interface{} { return reflect.New(reflectType).Interface() }, }) return v.(FieldNewValuePool) }
Registered: Sun Sep 07 09:35:13 UTC 2025 - Last Modified: Mon Apr 11 13:37:44 UTC 2022 - 345 bytes - Viewed (0) -
cmd/endpoint.go
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Sun Mar 30 00:56:02 UTC 2025 - 34.4K bytes - Viewed (0)