- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 117 for shards (0.06 sec)
-
docs/debugging/xl-meta/main.go
valid := 0 for shardIdx, shard := range splitFilled[:k] { shardConfig[shardIdx] = shard[offset] valid += int(shard[offset]) if shard[offset] == 0 { shards[shardIdx] = shards[shardIdx][:0] } else { shards[shardIdx] = append(shards[shardIdx][:0], splitData[shardIdx][offset]) } }
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Thu Sep 05 11:57:44 UTC 2024 - 40.3K bytes - Viewed (0) -
guava-tests/test/com/google/common/hash/HashingTest.java
} for (int shard = 2; shard <= MAX_SHARDS; shard++) { // Rough: don't exceed 1.2x the expected number of remaps by more than 20 assertTrue(map.get(shard) <= 1.2 * ITERS / shard + 20); } } private void countRemaps(long h, AtomicLongMap<Integer> map) { int last = 0; for (int shards = 2; shards <= MAX_SHARDS; shards++) {
Registered: Fri Nov 01 12:43:10 UTC 2024 - Last Modified: Tue Jul 09 17:40:09 UTC 2024 - 26.3K bytes - Viewed (0) -
src/main/java/org/codelibs/fess/suggest/request/popularwords/PopularWordsRequest.java
public void onResponse(final SearchResponse searchResponse) { if (searchResponse.getFailedShards() > 0) { deferred.reject(new SuggesterException("Search failure. Failed shards num:" + searchResponse.getFailedShards())); } else { deferred.resolve(createResponse(searchResponse)); } } @Override
Registered: Fri Nov 08 09:08:12 UTC 2024 - Last Modified: Sat Oct 12 00:10:39 UTC 2024 - 7K bytes - Viewed (0) -
src/main/java/org/codelibs/fess/suggest/request/suggest/SuggestRequest.java
public void onResponse(final SearchResponse searchResponse) { if (searchResponse.getFailedShards() > 0) { deferred.reject(new SuggesterException("Search failure. Failed shards num:" + searchResponse.getFailedShards())); } else { deferred.resolve(createResponse(searchResponse)); } } @Override
Registered: Fri Nov 08 09:08:12 UTC 2024 - Last Modified: Sat Oct 12 00:10:39 UTC 2024 - 13.3K bytes - Viewed (0) -
src/main/java/org/codelibs/fess/app/web/admin/maintenance/AdminMaintenanceAction.java
{ "aliases", "allocation", "count", "fielddata", "health", "indices", "master", "nodeattrs", "nodes", "pending_tasks", "plugins", "recovery", "repositories", "thread_pool", "shards", "segments", "snapshots", "templates" }; // =================================================================================== // Attribute
Registered: Thu Oct 31 13:40:30 UTC 2024 - Last Modified: Fri Oct 11 21:20:39 UTC 2024 - 14K bytes - Viewed (0) -
cmd/erasure-decode.go
// Reading first time on this disk, hence the buffer needs to be allocated. // Subsequent reads will reuse this buffer. p.buf[bufIdx] = make([]byte, p.shardSize) } // For the last shard, the shardsize might be less than previous shard sizes. // Hence the following statement ensures that the buffer size is reset to the right size. p.buf[bufIdx] = p.buf[bufIdx][:p.shardSize] n, err := rr.ReadAt(p.buf[bufIdx], p.offset)
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Thu Aug 29 01:40:52 UTC 2024 - 9.5K bytes - Viewed (0) -
istioctl/pkg/multixds/gather.go
retval := discovery.DiscoveryResponse{} if len(responses) == 0 { return &retval, nil } for _, response := range responses { // Combine all the shards as one, even if that means losing information about // the control plane version from each shard. retval.ControlPlane = response.ControlPlane retval.Resources = append(retval.Resources, response.Resources...) } return &retval, nil }
Registered: Wed Nov 06 22:53:10 UTC 2024 - Last Modified: Wed Oct 09 16:05:45 UTC 2024 - 13.6K bytes - Viewed (0) -
cmd/erasure-metadata.go
// In this case, parity == 0 implies that this object version is a // delete marker readQuorum = N/2 + 1 } if occ < readQuorum { // Ignore this parity since we don't have enough shards for read quorum continue } if occ > maxOcc { maxOcc = occ cparity = parity } } if maxOcc == 0 { // Did not found anything useful return -1 }
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Thu Oct 31 22:10:24 UTC 2024 - 21.3K bytes - Viewed (0) -
cmd/notification.go
// To avoid these problems we must split the work at scale. With 1000 node // setup becoming a reality we must try to shard the work properly such as // pick 10 nodes that precisely can send those 100 requests the first node // in the 10 node shard would coordinate between other 9 shards to get the // rest of the `99*9` requests. // // This essentially splits the workload properly and also allows for network
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Mon Sep 09 16:58:30 UTC 2024 - 46.2K bytes - Viewed (0) -
manifests/charts/UPDATING-CHARTS.md
# Table of Contents - [Updating charts and values.yaml](#updating-charts-and-valuesyaml) - [Acceptable Pull Requests](#acceptable-pull-requests) - [Making changes](#making-changes) - [Value deprecation](#value-deprecation) <!-- markdown-toc end --> # Updating charts and values.yaml ## Acceptable Pull Requests
Registered: Wed Nov 06 22:53:10 UTC 2024 - Last Modified: Thu Aug 15 16:31:46 UTC 2024 - 4.8K bytes - Viewed (0)