Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 171 for Groves (0.16 sec)

  1. cmd/erasure-object.go

    			if disk == nil || !disk.IsOnline() {
    				parityDrives++
    				offlineDrives++
    				continue
    			}
    		}
    
    		if offlineDrives >= (len(storageDisks)+1)/2 {
    			// if offline drives are more than 50% of the drives
    			// we have no quorum, we shouldn't proceed just
    			// fail at that point.
    			return ObjectInfo{}, toObjectErr(errErasureWriteQuorum, bucket, object)
    		}
    
    Go
    - Registered: Sun May 05 19:28:20 GMT 2024
    - Last Modified: Sun May 05 16:56:21 GMT 2024
    - 77.2K bytes
    - Viewed (2)
  2. buildscripts/verify-healing.sh

    		echo "my content" | /tmp/mc pipe myminio/testbucket/file-$i
    	done
    }
    
    function perform_test() {
    	start_port=$2
    
    	start_minio_3_node 120 $start_port
    
    	echo "Testing Distributed Erasure setup healing of drives"
    	echo "Remove the contents of the disks belonging to '${1}' node"
    
    	rm -rf ${WORK_DIR}/${1}/*/
    
    	set -x
    	start_minio_3_node 120 $start_port
    
    	check_heal ${1}
    	rv=$?
    	if [ "$rv" == "1" ]; then
    Shell Script
    - Registered: Sun May 05 19:28:20 GMT 2024
    - Last Modified: Thu Apr 25 21:55:41 GMT 2024
    - 4.2K bytes
    - Viewed (1)
  3. docs/config/README.md

    In most setups this is sufficient to heal the content after drive replacements. Setting `max_sleep` to a *lower* value and setting `max_io` to a *higher* value would make heal go faster.
    
    Plain Text
    - Registered: Sun May 05 19:28:20 GMT 2024
    - Last Modified: Mon Sep 11 21:48:54 GMT 2023
    - 17.7K bytes
    - Viewed (0)
  4. cmd/erasure-object_test.go

    	if err != nil {
    		t.Fatal(err)
    	}
    
    	// Object was uploaded with 4 known bad drives, so we should still be able to lose 3 drives and still write to the object.
    	erasureDisks = xl.getDisks()
    	z.serverPools[0].erasureDisksMu.Lock()
    	xl.getDisks = func() []StorageAPI {
    		erasureDisks[7] = nil
    		erasureDisks[8] = nil
    Go
    - Registered: Sun May 05 19:28:20 GMT 2024
    - Last Modified: Tue Jan 30 20:43:25 GMT 2024
    - 36.8K bytes
    - Viewed (0)
  5. docs/metrics/prometheus/grafana/minio-dashboard.json

              "editorMode": "builder",
              "expr": "minio_cluster_health_erasure_set_online_drives{job=~\"$scrape_jobs\"}",
              "fullMetaSearch": false,
              "hide": false,
              "includeNullMetadata": true,
              "instant": false,
              "legendFormat": "Pool {{pool}} / Set {{set}} - Online Drives",
              "range": true,
              "refId": "A",
              "useBackend": false
            },
    Json
    - Registered: Sun May 05 19:28:20 GMT 2024
    - Last Modified: Mon Apr 15 10:03:01 GMT 2024
    - 93K bytes
    - Viewed (2)
  6. Makefile

    	@(env bash $(PWD)/buildscripts/verify-healing-empty-erasure-set.sh)
    	@(env bash $(PWD)/buildscripts/heal-inconsistent-versions.sh)
    
    verify-healing-with-root-disks: ## verify healing root disks
    	@echo "Verify healing with root drives"
    	@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
    	@(env bash $(PWD)/buildscripts/verify-healing-with-root-disks.sh)
    
    Plain Text
    - Registered: Sun May 05 19:28:20 GMT 2024
    - Last Modified: Thu Apr 25 21:55:41 GMT 2024
    - 10.3K bytes
    - Viewed (1)
  7. cmd/object-api-errors.go

    func (e SlowDown) Error() string {
    	return "Please reduce your request rate"
    }
    
    // RQErrType reason for read quorum error.
    type RQErrType int
    
    const (
    	// RQInsufficientOnlineDrives - not enough online drives.
    	RQInsufficientOnlineDrives RQErrType = 1 << iota
    	// RQInconsistentMeta - inconsistent metadata.
    	RQInconsistentMeta
    )
    
    func (t RQErrType) String() string {
    	switch t {
    	case RQInsufficientOnlineDrives:
    Go
    - Registered: Sun May 05 19:28:20 GMT 2024
    - Last Modified: Sun May 05 16:56:21 GMT 2024
    - 21.3K bytes
    - Viewed (0)
  8. helm/minio/values.yaml

    ## If left empty, it defaults to the value of {{ .Values.mountPath }}
    ## If defined, it must be a sub-directory of the path specified in {{ .Values.mountPath }}
    ##
    bucketRoot: ""
    
    # Number of drives attached to a node
    drivesPerNode: 1
    # Number of MinIO containers running
    replicas: 16
    # Number of expanded MinIO clusters
    pools: 1
    
    ## TLS Settings for MinIO
    tls:
      enabled: false
    Others
    - Registered: Sun May 05 19:28:20 GMT 2024
    - Last Modified: Sun Apr 28 10:14:37 GMT 2024
    - 18.4K bytes
    - Viewed (0)
  9. cmd/erasure-server-pool-rebalance.go

    func (set *erasureObjects) listObjectsToRebalance(ctx context.Context, bucketName string, fn func(entry metaCacheEntry)) error {
    	disks, _ := set.getOnlineDisksWithHealing(false)
    	if len(disks) == 0 {
    		return fmt.Errorf("no online drives found for set with endpoints %s", set.getEndpoints())
    	}
    
    	// However many we ask, versions must exist on ~50%
    	listingQuorum := (set.setDriveCount + 1) / 2
    
    	// How to resolve partial results.
    Go
    - Registered: Sun May 05 19:28:20 GMT 2024
    - Last Modified: Fri Apr 26 19:29:28 GMT 2024
    - 27.2K bytes
    - Viewed (0)
  10. cmd/iam.go

    		case <-ctx.Done():
    			return
    		}
    	}
    }
    
    func (sys *IAMSys) validateAndAddRolePolicyMappings(ctx context.Context, m map[arn.ARN]string) {
    	// Validate that policies associated with roles are defined. If
    	// authZ plugin is set, role policies are just claims sent to
    	// the plugin and they need not exist.
    	//
    	// If some mapped policies do not exist, we print some error
    Go
    - Registered: Sun May 05 19:28:20 GMT 2024
    - Last Modified: Thu Apr 25 21:28:16 GMT 2024
    - 71.1K bytes
    - Viewed (1)
Back to top