- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 28 for Predecessors (0.12 sec)
-
src/cmd/compile/internal/ssa/rewrite.go
var args []*Value for _, a := range target.Args { if a != load && a.Block.ID == target.Block.ID { args = append(args, a) } } // memPreds contains memory states known to be predecessors of load's // memory state. It is lazily initialized. var memPreds map[*Value]bool for i := 0; len(args) > 0; i++ { const limit = 100 if i >= limit { // Give up if we have done a lot of iterations.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 19:02:52 UTC 2024 - 64.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/lower_globals_to_ml_program.cc
&arg) .succeeded()) { options.insert(arg); } } } else { op->emitOpError("Predecessor op doesn't implement BranchOpInterface"); return failure(); } } } if (!options.empty()) { if (options.size() != 1) { op->emitOpError("Incompatible code paths.");
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.6K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/looprotate.go
// come right after it. after := map[ID][]*Block{} // Check each loop header and decide if we want to move it. for _, loop := range loopnest.loops { b := loop.header var p *Block // b's in-loop predecessor for _, e := range b.Preds { if e.b.Kind != BlockPlain { continue } if loopnest.b2l[e.b.ID] != loop { continue } p = e.b } if p == nil { continue }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 15:44:14 UTC 2024 - 3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/sparsecore/embedding_program_key.cc
// (parent) block, we need to check that it's before (the // (parent of) the preprocess_op. if (o->isBeforeInBlock(it->second)) { break; // valid compile predecessor } else { return WalkResult::advance(); } } o = o->getParentOp(); } // Check that the the compile op actually passes its results to its parents.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.4K bytes - Viewed (0) -
platforms/core-configuration/model-core/src/main/java/org/gradle/model/internal/registry/DefaultModelRegistry.java
// Node must be at the predecessor state before calculating dependencies NodeAtState predecessor = new NodeAtState(getPath(), getTargetState().previous()); dependencies.add(graph.nodeAtState(predecessor)); // Transition any other nodes that depend on the predecessor state dependencies.add(new TransitionDependents(predecessor)); seenPredecessor = true;
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Fri May 24 15:40:00 UTC 2024 - 45.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/replicate_invariant_op_hoisting.cc
return WalkResult::interrupt(); return WalkResult::advance(); }); return result.wasInterrupted(); } // Make invariant the `ShapeOp`s or a `ReadVariableOp` that's the `ShapeOp`'s // predecessor. void MakeShapeOpInvariant(tf_device::ReplicateOp replicate_op, int num_replicas, Block* replicate_block, TF::ShapeOp shape_op) { // Ignore ShapeOps that have virtual devices.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7.5K bytes - Viewed (0) -
src/internal/dag/parse.go
} for _, r := range rules { if r.op == "!<" { disallowed = append(disallowed, r) continue } for _, def := range r.def { if def == "NONE" { errorf("NONE cannot be a predecessor") continue } if !g.addNode(def) { errorf("multiple definitions for %s", def) } for _, less := range r.less { if less == "NONE" { continue }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:00:11 UTC 2024 - 6.7K bytes - Viewed (0) -
src/internal/trace/gc.go
} } // An integrator tracks a position in a utilization function and // integrates it. type integrator struct { u *mmuSeries // pos is the index in u.util of the current time's non-strict // predecessor. pos int } // advance returns the integral of the utilization function from 0 to // time. advance must be called on monotonically increasing values of // times.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 26K bytes - Viewed (0) -
src/cmd/compile/internal/ssagen/ssa.go
} return &ssa.FuncLines{Filename: file, StartLineno: start, Lines: lines}, nil } // updateUnsetPredPos propagates the earliest-value position information for b // towards all of b's predecessors that need a position, and recurs on that // predecessor if its position is updated. B should have a non-empty position. func (s *state) updateUnsetPredPos(b *ssa.Block) { if b.Pos == src.NoXPos { s.Fatalf("Block %s should have a position", b) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Jun 10 19:44:43 UTC 2024 - 284.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.td
let summary = "Colocates each Split op with its predecessor"; let constructor = "TFTPU::CreateTPUColocateSplitsPass()"; let description = [{ It is beneficial for performance to assign a `Split` op to the same device as its predecessor. This is because the weight of cut edges is always minimized when the `Split` is with its predecessor. This colocation
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 21:18:05 UTC 2024 - 99.6K bytes - Viewed (0)