- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 1,139 for batches (0.34 sec)
-
src/internal/trace/batchcursor.go
lastTs Time idx int // next index into []batch dataOff int // next index into batch.data ev baseEvent // last read event } func (b *batchCursor) nextEvent(batches []batch, freq frequency) (ok bool, err error) { // Batches should generally always have at least one event, // but let's be defensive about that and accept empty batches. for b.idx < len(batches) && len(batches[b.idx].data) == b.dataOff { b.idx++
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 4.1K bytes - Viewed (0) -
staging/src/k8s.io/apiserver/plugin/pkg/audit/buffered/doc.go
See the License for the specific language governing permissions and limitations under the License. */ // Package buffered provides an implementation for the audit.Backend interface // that batches incoming audit events and sends batches to the delegate audit.Backend.
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Thu Feb 22 18:40:46 UTC 2018 - 809 bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/tfrt_fallback/batching_fallback.mlir
batching_queue = "batching_queue", enable_large_batch_splitting = false, Tin = [i32], Tcaptured = [i32], Tout = [i32]} : 1 // Since batch function kernel scheduling is async, the above 2 batches can arrive in any order. // CHECK: Running @matmul_cpu // CHECK-NEXT: Tensor<type: int32 shape: [4,2] values: [[value_output:.*]]> // CHECK: Tensor<type: int32 shape: [2,2] values: [4 4]
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jul 18 22:58:56 UTC 2023 - 8.6K bytes - Viewed (0) -
src/internal/trace/reader.go
r.cpuSamples = r.gen.cpuSamples // Reset frontier. for m, batches := range r.gen.batches { bc := &batchCursor{m: m} ok, err := bc.nextEvent(batches, r.gen.freq) if err != nil { return Event{}, err } if !ok { // Turns out there aren't actually any events in these batches. continue } r.frontier = heapInsert(r.frontier, bc) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 6.7K bytes - Viewed (0) -
src/internal/trace/internal/testgen/go122/trace.go
trace *Trace gen uint64 batches []*Batch strings map[string]uint64 stacks map[stack]uint64 // Options applied when Trace.Generate is called. ignoreStringBatchSizeLimit bool ignoreStackBatchSizeLimit bool } // Batch starts a new event batch in the trace data. // // This is convenience function for generating correct batches. func (g *Generation) Batch(thread trace.ThreadID, time Time) *Batch {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 9.7K bytes - Viewed (0) -
staging/src/k8s.io/apiserver/plugin/pkg/audit/buffered/buffered.go
// WaitGroup to control the concurrency of sending batches to the delegate backend. // Worker routine calls Add before sending a batch and // then spawns a routine that calls Done after batch was processed by the delegate backend. // This WaitGroup is used to wait for all sending routines to finish before shutting down audit backend. wg sync.WaitGroup // Limits the number of batches sent to the delegate backend per second.
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Jul 26 17:14:05 UTC 2022 - 9.1K bytes - Viewed (0) -
platforms/software/dependency-management/src/main/java/org/gradle/api/internal/artifacts/ivyservice/resolveengine/graph/conflicts/FailOnVersionConflictGraphVisitor.java
import org.gradle.api.internal.artifacts.ivyservice.resolveengine.graph.DependencyGraphVisitor; import java.util.LinkedHashSet; import java.util.Set; /** * A visitor which batches up all conflicts and reports them all at once at the end of * the resolution. */ public class FailOnVersionConflictGraphVisitor implements DependencyGraphVisitor {
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Thu May 09 11:33:46 UTC 2024 - 2.9K bytes - Viewed (0) -
src/runtime/tracestring.go
// traceStringTable is map of string -> unique ID that also manages // writing strings out into the trace. type traceStringTable struct { // lock protects buf. lock mutex buf *traceBuf // string batches to write out to the trace. // tab is a mapping of string -> unique ID. tab traceMap } // put adds a string to the table, emits it, and returns a unique ID for it.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 15 17:03:35 UTC 2024 - 2.4K bytes - Viewed (0) -
src/crypto/cipher/cfb.go
if x.outUsed == len(x.out) { x.b.Encrypt(x.out, x.next) x.outUsed = 0 } if x.decrypt { // We can precompute a larger segment of the // keystream on decryption. This will allow // larger batches for xor, and we should be // able to match CTR/OFB performance. copy(x.next[x.outUsed:], src) } n := subtle.XORBytes(dst, src, x.out[x.outUsed:]) if !x.decrypt { copy(x.next[x.outUsed:], dst) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Oct 13 17:09:47 UTC 2023 - 2K bytes - Viewed (0) -
src/runtime/traceexp.go
// Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package runtime // traceExpWriter is a wrapper around trace writer that produces traceEvExperimentalBatch // batches. This means that the data written to the writer need not conform to the standard // trace format. type traceExpWriter struct { traceWriter exp traceExperiment }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:47:01 UTC 2024 - 2.4K bytes - Viewed (0)