- Sort Score
- Result 10 results
- Languages All
Results 151 - 160 of 421 for driver (0.07 sec)
-
docs/logging/README.md
- Additionally in the case of the erasure coded setup `tags.objectLocation` provides per object details about - Pool number the object operation was performed on. - Set number the object operation was performed on. - The list of drives participating in this operation belong to the set. ```json { "version": "1", "deploymentid": "90e81272-45d9-4fe8-9c45-c9a7322bf4b5", "time": "2024-05-09T07:38:10.449688982Z", "event": "",Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Tue Aug 12 18:20:36 UTC 2025 - 10.5K bytes - Viewed (0) -
cmd/background-newdisks-heal-ops.go
h.QueuedBuckets = append(h.QueuedBuckets, b.Name) } } } func (h *healingTracker) printTo(writer io.Writer) { h.mu.RLock() defer h.mu.RUnlock() b, err := json.MarshalIndent(h, "", " ") if err != nil { writer.Write([]byte(err.Error())) return } writer.Write(b) } // toHealingDisk converts the information to madmin.HealingDisk
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 16.5K bytes - Viewed (0) -
src/main/java/jcifs/dcerpc/ndr/NdrBuffer.java
this.length = 0; this.deferred = this; } /** * Creates a derived NdrBuffer at the specified index position. * * @param idx the index position for the derived buffer * @return the derived NdrBuffer */ public NdrBuffer derive(final int idx) { final NdrBuffer nb = new NdrBuffer(this.buf, this.start); nb.index = idx;Registered: Sun Sep 07 00:10:21 UTC 2025 - Last Modified: Sat Aug 16 01:32:48 UTC 2025 - 10.7K bytes - Viewed (0) -
cmd/erasure-errors.go
import "errors" // errErasureReadQuorum - did not meet read quorum. var errErasureReadQuorum = errors.New("Read failed. Insufficient number of drives online") // errErasureWriteQuorum - did not meet write quorum. var errErasureWriteQuorum = errors.New("Write failed. Insufficient number of drives online") // errNoHealRequired - returned when healing is attempted on a previously healed disks.
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Thu Aug 04 23:10:08 UTC 2022 - 1.2K bytes - Viewed (0) -
buildscripts/verify-healing-empty-erasure-set.sh
timeout 15m /tmp/mc ready myminio || fail # Wait for all drives to be online and formatted while [ $(/tmp/mc admin info --json myminio | jq '.info.servers[].drives[].state | select(. != "ok")' | wc -l) -gt 0 ]; do sleep 1; done # Wait for all drives to be healed while [ $(/tmp/mc admin info --json myminio | jq '.info.servers[].drives[].healing | select(. != null) | select(. == true)' | wc -l) -gt 0 ]; do sleep 1; done
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Jul 12 20:51:54 UTC 2024 - 3.7K bytes - Viewed (0) -
cmd/speedtest.go
ch := make(chan madmin.SpeedTestResult, 1) go func() { defer xioutil.SafeClose(ch) concurrency := opts.concurrencyStart if opts.autotune { // if we have less drives than concurrency then choose // only the concurrency to be number of drives to start // with - since default '32' might be big and may not // complete in total time of 10s. if globalEndpoints.NEndpoints() < concurrency {
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Tue May 27 15:19:03 UTC 2025 - 9.2K bytes - Viewed (0) -
cmd/config-current.go
} } case config.DriveSubSys: driveConfig, err := drive.LookupConfig(s[config.DriveSubSys][config.Default]) if err != nil { configLogIf(ctx, fmt.Errorf("Unable to load drive config: %w", err)) } else { if err = globalDriveConfig.Update(driveConfig); err != nil { configLogIf(ctx, fmt.Errorf("Unable to update drive config: %v", err)) } } case config.BrowserSubSys:
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 28.5K bytes - Viewed (0) -
docs/throttle/README.md
If you have traditional spinning (hdd) drives, some applications with high concurrency might require MinIO cluster to be tuned such that to avoid random I/O on the drives. The way to convert high concurrent I/O into a sequential I/O is by reducing the number of concurrent operations allowed per cluster. This allows MinIO cluster to be operationally resilient to such workloads, while also making sure the drives are at optimal efficiency and responsive.
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 16 08:43:49 UTC 2024 - 1.5K bytes - Viewed (1) -
cmd/server-main_test.go
ctx, cancel := context.WithCancel(t.Context()) defer cancel() // Tests for ErasureSD object layer. nDisks := 1 disks, err := getRandomDisks(nDisks) if err != nil { t.Fatal("Failed to create drives for the backend") } defer removeRoots(disks) obj, err := newObjectLayer(ctx, mustGetPoolEndpoints(0, disks...)) if err != nil { t.Fatal("Unexpected object layer initialization error", err) }
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 3.1K bytes - Viewed (0) -
cmd/peer-s3-client.go
SetCount: -1, // explicitly set an invalid value -1, for bucket heal scenario } for i, err := range errs { if err == nil { res.Before.Drives = append(res.Before.Drives, healBucketResults[i].Before.Drives...) res.After.Drives = append(res.After.Drives, healBucketResults[i].After.Drives...) } } return res, nil } // ListBuckets lists buckets across all nodes and returns a consistent view:Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 15.6K bytes - Viewed (0)