Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 151 for subgraph4 (0.14 sec)

  1. tensorflow/compiler/mlir/lite/experimental/tac/transforms/pick_subgraphs.cc

    //     subgraph3 (GPU)      subgraph4(GPU)
    //         |             /
    //      subgraph5 (GPU)
    //         |
    //      subgraph6 (CPU)
    //
    // The overall workflow of the pick subgraphs pass:
    //  1) Build subgraphs
    //    1.1) Collect output subgraphs.
    //    1.2) Build `Subgraph` and their "alternative view" from FuncOp.
    //  2) Pick subgraphs
    //    2.1) Populate the "dp table" for (subgraph, hardware).
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Nov 24 15:10:02 UTC 2022
    - 19.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/replace_stablehlo_ops_in_main_function_with_xla_call_module_ops.mlir

      // CHECK: return %[[SUBGRAPH_1]] : tensor<1024x3xf32>
      // CHECK: }
    }
    
    // -----
    
    // Tests where StableHLO graph in main has branch.
    // This test makes sure the branch will not be added to subgraph when it reaches
    // a tf op:
    // stablehlo.add and %0 are not in the same subgraph.
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 01:09:50 UTC 2024
    - 39.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc

      MarkGuaranteedConstants(*graph_in_, src_arg_pairs);
    
      for (auto& entry : subgraphs_) {
        Subgraph& subgraph = entry.second;
        FixupSourceAndSinkEdges(subgraph.GetGraph());
      }
    
      if (VLOG_IS_ON(1)) {
        // Dump subgraphs.
        for (auto& entry : subgraphs_) {
          DumpGraphToFile(
              absl::StrCat("encapsulate_subgraphs_subgraph_", entry.first),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 51K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc

      ASSERT_THAT(status, Eq(kTfLiteOk));
    
      const auto subgraph = model_.subgraphs[0].get();
    
      // The model should only have 3 inputs and 1 output.
      EXPECT_THAT(subgraph->inputs, SizeIs(3));
      EXPECT_THAT(subgraph->outputs, SizeIs(1));
    
      const auto& op1 = subgraph->operators[1].get();
      const auto& op2 = subgraph->operators[2].get();
      const auto& op3 = subgraph->operators[3].get();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 73.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/passes/replace_stablehlo_ops_in_main_function_with_xla_call_module_ops.cc

    }
    
    // Check if the op should be added to the subgraph.
    // The op should be added to the subgraph if all of its users match one
    // of following two conditions:
    // 1: The user is already in the current subgraph.
    // 2: The user will reach a dead end.
    //
    // If the op should be added to the subgraph and there are users who
    // will reach the dead end, add the ops on the dead end to the subgraph as well.
    bool ShouldAddOpToSubgraph(Operation* op,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 21K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc

        for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size();
             ++subgraph_idx) {
          const auto subgraph = model->subgraphs()->Get(subgraph_idx);
          for (size_t i = 0; i < subgraph->inputs()->size(); ++i) {
            if (subgraph->inputs()->Get(i) == tensor_idx) {
              return true;
            }
          }
          for (size_t i = 0; i < subgraph->outputs()->size(); ++i) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 32.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/test_schema.fbs

      intermediates:[int];
    }
    
    // The root type, defining a subgraph, which typically represents an entire
    // model.
    table SubGraph {
      // A list of all tensors used in this subgraph.
      tensors:[Tensor];
    
      // Indices of the tensors that are inputs into this subgraph. Note this is
      // the list of non-static tensors that feed into the subgraph for inference.
      inputs:[int];
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 19 19:46:06 UTC 2021
    - 26.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/experimental/common/outline_operations.h

    // to ops in the subgraph, which are not self-contained within the subgraph.
    // The outputs of this function are taken to be the results of ops in the
    // subgraph which are referenced as operands outside of the subgraph.
    // Also refer to documention of `AccumulateOperandsDefinedAbove` &
    // `AccumulateResultsDefinedWithin`.
    void ExtractSubgraphToFunc(const Subgraph& subgraph, OpBuilder& builder,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Nov 17 18:49:43 UTC 2022
    - 6K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/experimental/tac/transforms/raise_target_subgraphs.cc

            GetInferenceDeviceTypeForOp(partition_ops.front()).value();
        Subgraph old_subgraph(partition_ops, ++func_count);
        OpsAdded ops_added;
        ExtractSubgraphToFunc(old_subgraph, builder, module, ops_added);
        AddAttrs(ops_added, builder, func_count);
        // Ops in "CPU" subgraphs may nested regions with other device subgraphs.
        // We recur into these nested blocks to raise those as well. We don't raise
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/experimental/tac/tests/pick-subgraphs.mlir

    // RUN: tac-opt-all-backends -tfl-pick-subgraphs %s -split-input-file -verify-diagnostics | FileCheck %s
    
    module {
      func.func @main(%arg0: tensor<100xf32>, %arg1: tensor<100xf32>, %arg2: tensor<100xf32>, %arg3: tensor<100xf32>) -> tensor<2x100xf32> {
        %0 = func.call @func_0_GPU_FLOAT(%arg0, %arg1, %arg2) {tac.device = "GPU", tac.inference_type = "FLOAT", tac.interface_name = "func_0"} : (tensor<100xf32>, tensor<100xf32>, tensor<100xf32>) -> tensor<100xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 24.3K bytes
    - Viewed (0)
Back to top