- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 112 for quorum (0.2 sec)
-
internal/dsync/dsync_test.go
case <-ctx.Done(): t.Fatal("Lock context canceled which is not expected") case <-timer.C: } // Should be safe operation in all cases dm.Unlock(t.Context()) } // Test canceling context while quorum servers report lock not found func TestFailedRefreshLock(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } // Simulate Refresh response to return no locking found
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 10.8K bytes - Viewed (0) -
cmd/erasure-sets.go
- Any unrecognized disks - return failure Some disks are offline and we have quorum. ----------------- - Some unformatted - format all and return success, treat disks offline as corrupted. - Any JBOD inconsistent - return failure - Some are corrupt (missing format.json) - Any unrecognized disks - return failure No read quorum ----------------- failure for all cases.
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 37K bytes - Viewed (1) -
Makefile
test-multipart: install-race ## test multipart @echo "Test multipart behavior when part files are missing" @(env bash $(PWD)/buildscripts/multipart-quorum-test.sh) test-timeout: install-race ## test multipart @echo "Test server timeout" @(env bash $(PWD)/buildscripts/test-timeout.sh) verify: install-race ## verify minio various setups
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Apr 27 00:44:22 UTC 2025 - 11.2K bytes - Viewed (0) -
cmd/global-heal.go
reqInfo := &logger.ReqInfo{API: "BackgroundHeal"} ctx, cancelCtx := context.WithCancel(logger.SetReqInfo(GlobalContext, reqInfo)) hs := madmin.HealOpts{ // Remove objects that do not have read-quorum Remove: healDeleteDangling, } return &healSequence{ startTime: UTCNow(), clientToken: bgHealingUUID, // run-background heal with reserved bucket bucket: minioReservedBucket, settings: hs,Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Apr 04 13:49:12 UTC 2025 - 16.2K bytes - Viewed (0) -
cmd/erasure-server-pool-rebalance.go
listingQuorum := (set.setDriveCount + 1) / 2 // How to resolve partial results. resolver := metadataResolutionParams{ dirQuorum: listingQuorum, // make sure to capture all quorum ratios objQuorum: listingQuorum, // make sure to capture all quorum ratios bucket: bucketName, } err := listPathRaw(ctx, listPathRawOptions{ disks: disks, bucket: bucketName, recursive: true,Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 28.7K bytes - Viewed (0) -
docs/metrics/prometheus/grafana/minio-dashboard.json
"fullMetaSearch": false, "hide": false, "includeNullMetadata": true, "instant": false, "legendFormat": "Pool {{pool}} / Set {{set}} - Read Quorum", "range": true, "refId": "B", "useBackend": false }, { "datasource": { "type": "prometheus",
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Mon Aug 04 01:46:49 UTC 2025 - 93.1K bytes - Viewed (0) -
cmd/metacache-server-pool.go
if err == nil { allAtEOF = false } errs[i] = err }(len(errs), set) errs = append(errs, nil) } } mu.Unlock() // Gather results to a single channel. // Quorum is one since we are merging across sets. err := mergeEntryChannels(ctx, inputs, results, 1) cancelList() wg.Wait() // we should return 'errs' from per disk if isAllNotFound(errs) {Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 12.9K bytes - Viewed (0) -
cmd/erasure-server-pool-decom.go
listingQuorum := (set.setDriveCount + 1) / 2 // How to resolve partial results. resolver := metadataResolutionParams{ dirQuorum: listingQuorum, // make sure to capture all quorum ratios objQuorum: listingQuorum, // make sure to capture all quorum ratios bucket: bi.Name, } err := listPathRaw(ctx, listPathRawOptions{ disks: disks, bucket: bi.Name, path: bi.Prefix,Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 42.2K bytes - Viewed (1) -
cmd/admin-handlers.go
entry := &madmin.LockEntry{ Timestamp: t, Elapsed: now.Sub(t), Resource: resource, ServerList: []string{server}, Source: l.Source, Owner: l.Owner, ID: l.UID, Quorum: l.Quorum, } if l.Writer { entry.Type = "WRITE" } else { entry.Type = "READ" } return entry } func topLockEntries(peerLocks []*PeerLocks, stale bool) madmin.LockEntries {Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 99.7K bytes - Viewed (0) -
cmd/metrics-v2.go
}) return mg } func getClusterWriteQuorumMD() MetricDescription { return MetricDescription{ Namespace: clusterMetricNamespace, Subsystem: "write", Name: "quorum", Help: "Maximum write quorum across all pools and sets", Type: gaugeMetric, } } func getClusterHealthStatusMD() MetricDescription { return MetricDescription{ Namespace: clusterMetricNamespace,
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 133.6K bytes - Viewed (0)