- Sort Score
- Result 10 results
- Languages All
Results 171 - 180 of 3,149 for iterations (0.19 sec)
-
src/runtime/trace.go
// also no stale generation values left. Therefore, it's safe to flush // any buffers that remain in that generation's slot. const debugDeadlock = false systemstack(func() { // Track iterations for some rudimentary deadlock detection. i := 0 detectedDeadlock := false for mToFlush != nil { prev := &mToFlush for mp := *prev; mp != nil; { if mp.trace.seqlock.Load()%2 != 0 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 21:17:41 UTC 2024 - 37.1K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/prove.go
// no point in putting it into the facts table. } // try to rewrite to a downward counting loop checking against start if the // loop body does not depends on ind or nxt and end is known before the loop. // This reduce pressure on the register allocator because this do not need // to use end on each iteration anymore. We compare against the start constant instead.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:30:21 UTC 2024 - 48.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc
// operand type was inferred. This would need to be careful if working on a // region that would not be isolated. for (int iteration = 0; iteration < max_iterations && changed; ++iteration) { changed = false; LLVM_DEBUG(llvm::dbgs() << "Shape inference, iteration " << iteration << "\n"); auto res = region->walk([&](Operation* op) { auto abstract_op = op->getRegisteredInfo();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Jun 08 07:28:49 UTC 2024 - 134.1K bytes - Viewed (0) -
doc/go_spec.html
a single byte in the string. </li> <li> The iteration order over maps is not specified and is not guaranteed to be the same from one iteration to the next. If a map entry that has not yet been reached is removed during iteration, the corresponding iteration value will not be produced. If a map entry is created during iteration, that entry may be produced during the iteration or
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jun 04 21:07:21 UTC 2024 - 281.5K bytes - Viewed (1) -
tensorflow/compiler/mlir/tensorflow/tests/convert_control_to_data_outputs.mlir
!tf_res = tensor<!tf_type.resource<tensor<f32>>> // Tests loop with resource that is unique per iteration. // // In cases where a resource-allocating op creates a new unique resource per // loop iteration (ops with `TF_UniqueResourceAllocation` trait, in this case: // `tf.StackV2`), make sure that we don't create data dependencies between // different iterations for such resources. This is in line with the behavior
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 18:35:00 UTC 2024 - 68.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/sparsecore/embedding_pipelining.cc
return vec; } void GatherOpsForExtraction(mlir::SetVector<Operation*>* operations, const mlir::SetVector<Operation*>& ops_to_avoid, bool predecessors, bool successors) { // Walk the input and output dependencies of the Ops in `operations` to form // the closer of Ops needed to evaluate 'operations'. Input dependencies are
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 92.9K bytes - Viewed (0) -
src/os/exec/exec_test.go
go runCommand(cb, bres) if got, want := <-ares, fmt.Sprintf("fd3: listener %s\n", la.Addr()); got != want { t.Errorf("iteration %d, process A got:\n%s\nwant:\n%s\n", i, got, want) } if got, want := <-bres, fmt.Sprintf("fd3: listener %s\n", lb.Addr()); got != want { t.Errorf("iteration %d, process B got:\n%s\nwant:\n%s\n", i, got, want) } la.Close() lb.Close() for _, f := range ca.ExtraFiles {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 20:13:53 UTC 2024 - 48.4K bytes - Viewed (0) -
src/cmd/compile/internal/liveness/plive.go
// Walk blocks in postorder ordering. This improves convergence. po := lv.f.Postorder() // Iterate through the blocks in reverse round-robin fashion. A work // queue might be slightly faster. As is, the number of iterations is // so low that it hardly seems to be worth the complexity. for change := true; change; { change = false for _, b := range po { be := lv.blockEffects(b) newliveout.Clear()
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 15:22:22 UTC 2024 - 45.2K bytes - Viewed (0) -
src/cmd/go/alldocs.go
// // -fuzztime t // Run enough iterations of the fuzz target during fuzzing to take t, // specified as a time.Duration (for example, -fuzztime 1h30s). // The default is to run forever. // The special syntax Nx means to run the fuzz target N times // (for example, -fuzztime 1000x). // // -fuzzminimizetime t // Run enough iterations of the fuzz target during each minimization
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jun 11 16:54:28 UTC 2024 - 142.4K bytes - Viewed (0) -
guava/src/com/google/common/collect/MapMakerInternalMap.java
* read operations anyway: * * - All (unsynchronized) read operations must first read the "count" field, and should not * look at table entries if it is 0. * * - All (synchronized) write operations should write to the "count" field after structurally * changing any bin. The operations must not take any action that could even momentarily
Registered: Wed Jun 12 16:38:11 UTC 2024 - Last Modified: Sat May 18 03:24:34 UTC 2024 - 90.8K bytes - Viewed (0)