Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 4 of 4 for Predecessors (0.15 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/side-effect-analysis-test.mlir

          // expected-remark@above {{Predecessors: {6}}}
          // expected-remark@above {{Successors: {8}}}
          tf_executor.yield %read3 : tensor<32xf32>
          // expected-remark@above {{ID: 8}}
          // expected-remark@above {{Predecessors: {4,5,7}}}
        }
        tf_executor.fetch %island#0 : tensor<32xf32>
        // expected-remark@above {{ID: 10}}
        // expected-remark@above {{Predecessors: {9}}}
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Dec 20 04:39:18 UTC 2023
    - 129.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/extract_outside_compilation_pass.cc

          send_node_in_host_graph = n;
          break;
        }
      }
      if (send_node_in_host_graph) {
        // This is an "top-level" outside compilation. Clear the graph, and copy
        // SendFromHost and all its predecessors from `host_graph`.
        std::vector<Node*> nodes;
        nodes.reserve(g->num_op_nodes());
        for (Node* n : g->op_nodes()) {
          nodes.push_back(n);
        }
        for (Node* n : nodes) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 12 06:33:33 UTC 2024
    - 104.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.td

      let summary = "Colocates each Split op with its predecessor";
      let constructor = "TFTPU::CreateTPUColocateSplitsPass()";
      let description = [{
        It is beneficial for performance to assign a `Split` op to the same device
        as its predecessor. This is because the weight of cut edges is always
        minimized when the `Split` is with its predecessor. This colocation
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:18:05 UTC 2024
    - 99.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/shape_inference.mlir

          ^bb0(%barg0: tensor<i32>, %barg1: tensor<!tf_type.variant<tensor<?x1xf32>>>): // no predeceessors
            %cond = "tf.Less"(%barg0, %size) : (tensor<i32>, tensor<i32>) -> tensor<i1>
            "tf.Yield"(%cond) : (tensor<i1>) -> ()
        }, {
          ^bb0(%barg0: tensor<i32>, %barg1: tensor<!tf_type.variant<tensor<?x1xf32>>>): // no predeceessors
          %index = "tf.AddV2"(%barg0, %one) : (tensor<i32>, tensor<i32>) -> tensor<i32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jan 23 17:24:10 UTC 2024
    - 167.4K bytes
    - Viewed (0)
Back to top