- Sort Score
- Result 10 results
- Languages All
Results 61 - 70 of 561 for drives (0.14 sec)
-
cmd/testdata/xl-meta-merge.zip
deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) for more complete documentation. ### Homebrew (recommended) Run the following command to install the latest stable MinIO package using [Homebrew](https://brew.sh/). Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data. ```sh brew install minio/stable/minio...
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Mar 08 17:50:48 UTC 2024 - 30.2K bytes - Viewed (0) -
cmd/typed-errors.go
// first server to initialize them in distributed set to initialize them. var errNotFirstDisk = errors.New("Not first drive") // error returned by first disk waiting to initialize other servers. var errFirstDiskWait = errors.New("Waiting on other drives") // error returned for a negative actual size. var errInvalidDecompressedSize = errors.New("Invalid Decompressed Size")
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Tue May 28 17:14:16 UTC 2024 - 5.8K bytes - Viewed (0) -
Makefile
@(env bash $(PWD)/buildscripts/heal-inconsistent-versions.sh) verify-healing-with-root-disks: install-race ## verify healing root disks @echo "Verify healing with root drives" @(env bash $(PWD)/buildscripts/verify-healing-with-root-disks.sh) verify-healing-with-rewrite: install-race ## verify healing to rewrite old xl.meta -> new xl.meta @echo "Verify healing with rewrite"
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Thu Oct 31 22:10:24 UTC 2024 - 11.1K bytes - Viewed (0) -
cmd/server-main.go
EXAMPLES: 1. Start MinIO server on "/home/shared" directory. {{.Prompt}} {{.HelpName}} /home/shared 2. Start single node server with 64 local drives "/mnt/data1" to "/mnt/data64". {{.Prompt}} {{.HelpName}} /mnt/data{1...64} 3. Start distributed MinIO server on an 32 node setup with 32 drives each, run following command on all the nodes {{.Prompt}} {{.HelpName}} http://node{1...32}.example.com/mnt/export{1...32}
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Tue Sep 24 21:50:11 UTC 2024 - 35.2K bytes - Viewed (1) -
cmd/erasure-decode.go
return nil, fmt.Errorf("%w (offline-disks=%d/%d)", errErasureReadQuorum, disksNotFound, len(p.readers)) } // Decode reads from readers, reconstructs data if needed and writes the data to the writer. // A set of preferred drives can be supplied. In that case they will be used and the data reconstructed.
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Thu Aug 29 01:40:52 UTC 2024 - 9.5K bytes - Viewed (0) -
cmd/metrics-resource.go
readsPerSec: "Reads per second on a drive", writesPerSec: "Writes per second on a drive", readsKBPerSec: "Kilobytes read per second on a drive", writesKBPerSec: "Kilobytes written per second on a drive", readsAwait: "Average time for read requests to be served on a drive", writesAwait: "Average time for write requests to be served on a drive",
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Wed Jul 24 23:30:33 UTC 2024 - 17.2K bytes - Viewed (0) -
cmd/erasure-object_test.go
if err != nil { t.Fatal(err) } // Object was uploaded with 4 known bad drives, so we should still be able to lose 3 drives and still write to the object. erasureDisks = xl.getDisks() z.serverPools[0].erasureDisksMu.Lock() xl.getDisks = func() []StorageAPI { erasureDisks[7] = nil erasureDisks[8] = nil
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Tue Jan 30 20:43:25 UTC 2024 - 36.8K bytes - Viewed (0) -
cmd/erasure-metadata_test.go
for i := range N { fi.Erasure.Index = i + 1 metaArr[i] = fi parities[i] = parity if i < agree { continue } metaArr[i].Erasure.Index = 0 // creates invalid fi on remaining drives parities[i] = -1 // invalid fi are assigned parity -1 } res.metaArr = metaArr res.parities = parities res.errs = make([]error, N) if agree >= N-parity { res.parity = parity
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Thu Jul 25 21:02:50 UTC 2024 - 13.5K bytes - Viewed (0) -
helm/minio/values.yaml
## If left empty, it defaults to the value of {{ .Values.mountPath }} ## If defined, it must be a sub-directory of the path specified in {{ .Values.mountPath }} ## bucketRoot: "" # Number of drives attached to a node drivesPerNode: 1 # Number of MinIO containers running replicas: 16 # Number of expanded MinIO clusters pools: 1 ## TLS Settings for MinIO tls: enabled: false
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Thu Oct 10 15:48:31 UTC 2024 - 18.8K bytes - Viewed (1) -
cmd/storage-rest-server.go
storageLogOnceIf(GlobalContext, fmt.Errorf("Drive is not writable %s, %s", endpoint, hint), "log-fatal-errs") } else { logger.Fatal(config.ErrUnableToWriteInBackend(err).Hint("%s", hint), "Unable to initialize backend") } case errors.Is(err, errFaultyDisk): if !exit { storageLogOnceIf(GlobalContext, fmt.Errorf("Drive is faulty at %s, please replace the drive - drive will be offline", endpoint), "log-fatal-errs")
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Wed Aug 14 17:11:51 UTC 2024 - 45.7K bytes - Viewed (0)