- Sort Score
- Result 10 results
- Languages All
Results 81 - 90 of 131 for Predecessors (0.13 sec)
-
src/cmd/go/internal/modfetch/codehost/codehost.go
ReadZip(ctx context.Context, rev, subdir string, maxSize int64) (zip io.ReadCloser, err error) // RecentTag returns the most recent tag on rev or one of its predecessors // with the given prefix. allowed may be used to filter out unwanted versions. RecentTag(ctx context.Context, rev, prefix string, allowed func(tag string) bool) (tag string, err error)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sun Apr 14 18:50:24 UTC 2024 - 12.8K bytes - Viewed (0) -
src/internal/trace/traceviewer/http.go
causing a single colored bar to be horizontally continuous but vertically displaced. </p> <p> Clicking on a span reveals information about it, such as its duration, its causal predecessors and successors, and the stack trace at the final moment when it yielded the logical processor, for example because it made a system call or tried to acquire a mutex.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Nov 21 21:29:53 UTC 2023 - 12.6K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/likelyadjust.go
certain := f.Cache.allocInt8Slice(f.NumBlocks()) // In the long run, all outcomes are at least this bad. Mainly for Exit defer f.Cache.freeInt8Slice(certain) local := f.Cache.allocInt8Slice(f.NumBlocks()) // for our immediate predecessors. defer f.Cache.freeInt8Slice(local) po := f.postorder() nest := f.loopnest() b2l := nest.b2l for _, b := range po { switch b.Kind { case BlockExit: // Very unlikely.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Oct 31 21:41:20 UTC 2022 - 15.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/extract_head_tail_outside_compilation.mlir
}) {num_cores_per_replica = 1, step_marker_location = "", topology = "", device_assignment = []} : () -> () func.return } // Test embedding ops can be head extracted and side effect analysis // predecessors are ignored. // CHECK-LABEL: func @embedding_head_extraction func.func @embedding_head_extraction(%arg0: tensor<!tf_type.string>) { // CHECK: "tf_device.launch"()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 31 08:59:10 UTC 2023 - 29.1K bytes - Viewed (0) -
tensorflow/compiler/jit/mark_for_compilation_pass.cc
best_pred_for_node[from] = -1; int rpo_index = 0, current_rpo_node; do { current_rpo_node = rpo[rpo_index++]; std::optional<int> some_pred, preferred_pred; for (int pred : cycles_graph_.Predecessors(current_rpo_node)) { if (!best_pred_for_node.contains(pred)) { continue; } // Ignore the from->to edge since we're trying to find an alternate path.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 12:19:41 UTC 2024 - 85.3K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/regalloc.go
// // The register allocator requires that a block is not scheduled until // at least one of its predecessors have been scheduled. The most recent // such predecessor provides the starting register state for a block. // // It also requires that there are no critical edges (critical = // comes from a block with >1 successor and goes to a block with >1 // predecessor). This makes it easy to add fixup code on merge edges -
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Nov 21 17:49:56 UTC 2023 - 87.2K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/rewrite.go
var args []*Value for _, a := range target.Args { if a != load && a.Block.ID == target.Block.ID { args = append(args, a) } } // memPreds contains memory states known to be predecessors of load's // memory state. It is lazily initialized. var memPreds map[*Value]bool for i := 0; len(args) > 0; i++ { const limit = 100 if i >= limit { // Give up if we have done a lot of iterations.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 19:02:52 UTC 2024 - 64.2K bytes - Viewed (0) -
tensorflow/compiler/jit/extract_outside_compilation_pass.cc
send_node_in_host_graph = n; break; } } if (send_node_in_host_graph) { // This is an "top-level" outside compilation. Clear the graph, and copy // SendFromHost and all its predecessors from `host_graph`. std::vector<Node*> nodes; nodes.reserve(g->num_op_nodes()); for (Node* n : g->op_nodes()) { nodes.push_back(n); } for (Node* n : nodes) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 12 06:33:33 UTC 2024 - 104.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/replicate_tensor_list_init_ops.mlir
%while:1 = "tf.WhileRegion"(%tl) ({ ^bb0(%barg1: tensor<!tf_type.variant<tensor<?x1xf32>>>): // no predeceessors %cond = "tf.false"():()-> tensor<i1> "tf.Yield"(%cond) : (tensor<i1>) -> () }, { ^bb0(%barg1: tensor<!tf_type.variant<tensor<?x1xf32>>>): // no predeceessors "tf.Yield"(%barg1) : (tensor<!tf_type.variant<tensor<?x1xf32>>>) -> ()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Jan 22 17:28:34 UTC 2023 - 8.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/order_by_dialect.cc
public: MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(OrderByDialectPass) void runOnOperation() override; }; int DialectOrdering(Operation* predecessor, Operation* op) { return predecessor && predecessor->getName().getDialectNamespace() == op->getName().getDialectNamespace(); } void OrderByDialectPass::runOnOperation() { ModuleOp module = getOperation();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Nov 08 17:01:11 UTC 2022 - 3.5K bytes - Viewed (0)