Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 4 of 4 for Scale (0.12 sec)

  1. cmd/speedtest.go

    			for _, result := range results {
    				totalPut += result.Uploads
    				totalGet += result.Downloads
    			}
    
    			if totalGet < throughputHighestGet {
    				// Following check is for situations
    				// when Writes() scale higher than Reads()
    				// - practically speaking this never happens
    				// and should never happen - however it has
    				// been seen recently due to hardware issues
    				// causes Reads() to go slower than Writes().
    Go
    - Registered: Sun May 05 19:28:20 GMT 2024
    - Last Modified: Sun Jan 28 18:04:17 GMT 2024
    - 9K bytes
    - Viewed (0)
  2. cmd/veeam-sos-api.go

    //     specific system recommendations based on current support case statistics and storage performance possibilities.
    //     Vendors might change the settings based on the configuration and scale out of the solution (more storage nodes =>
    //     higher task limit).
    //
    //     <S3ConcurrentTaskLimit>
    //
    Go
    - Registered: Sun May 05 19:28:20 GMT 2024
    - Last Modified: Tue Jan 30 18:43:58 GMT 2024
    - 8.2K bytes
    - Viewed (2)
  3. cmd/perf-tests.go

    	rand.Read(r.buf)
    
    	clusterInfos, err := globalSiteReplicationSys.GetClusterInfo(ctx)
    	if err != nil {
    		return madmin.SiteNetPerfNodeResult{Error: err.Error()}
    	}
    
    	// Scale the number of connections from 32 -> 4 from small to large clusters.
    	connectionsPerPeer := 3 + (29+len(clusterInfos.Sites)-1)/len(clusterInfos.Sites)
    
    	errStr := ""
    	var wg sync.WaitGroup
    
    Go
    - Registered: Sun May 05 19:28:20 GMT 2024
    - Last Modified: Sun Jan 28 18:04:17 GMT 2024
    - 11.3K bytes
    - Viewed (0)
  4. cmd/notification.go

    	// can easily eat into the internode bandwidth. This function would be mostly
    	// TX saturating, however there are situations where a RX might also saturate.
    	// To avoid these problems we must split the work at scale. With 1000 node
    	// setup becoming a reality we must try to shard the work properly such as
    	// pick 10 nodes that precisely can send those 100 requests the first node
    Go
    - Registered: Sun May 05 19:28:20 GMT 2024
    - Last Modified: Fri Apr 12 18:13:36 GMT 2024
    - 44.5K bytes
    - Viewed (0)
Back to top