Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 8 of 8 for unfold_batchmatmul (0.23 sec)

  1. tensorflow/compiler/mlir/lite/tests/end2end/unroll_batch_matmul_disabled.pbtxt

    # RUN: tf_tfl_translate -unfold_batchmatmul=false -tf-input-arrays=Placeholder,Placeholder_1 -tf-input-shapes=2,5,3:3,7 -tf-input-data-types=DT_FLOAT,DT_FLOAT -tf-output-arrays=MatMul -output-mlir %s -o - 2>&1 | FileCheck %s
    
    node {
      name: "Placeholder"
      op: "Placeholder"
      attr {
        key: "dtype"
        value {
          type: DT_FLOAT
        }
      }
      attr {
        key: "shape"
        value {
          shape {
            dim {
              size: 2
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 1.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/end2end/unroll_batch_matmul.pbtxt

    # RUN: tf_tfl_translate -tf-input-arrays=Placeholder,Placeholder_1 -tf-input-shapes=2,5,3:3,7 -tf-input-data-types=DT_FLOAT,DT_FLOAT -tf-output-arrays=MatMul -unfold_batchmatmul=true -output-mlir %s -o - 2>&1 | FileCheck %s
    
    node {
      name: "Placeholder"
      op: "Placeholder"
      attr {
        key: "dtype"
        value {
          type: DT_FLOAT
        }
      }
      attr {
        key: "shape"
        value {
          shape {
            dim {
              size: 2
            }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 2.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/python/graphdef_to_tfl_flatbuffer.cc

      mlir::TFL::PassConfig pass_config(quant_specs);
      bool emit_builtin_tflite_ops = !toco_flags.force_select_tf_ops();
      pass_config.emit_builtin_tflite_ops = emit_builtin_tflite_ops;
      pass_config.unfold_batch_matmul = toco_flags.unfold_batchmatmul();
      pass_config.lower_tensor_list_ops = toco_flags.lower_tensor_list_ops();
      pass_config.legalize_custom_tensor_list_ops =
          toco_flags.legalize_custom_tensor_list_ops();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 11 19:29:56 UTC 2024
    - 5.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tf_tfl_translate_cl.cc

        "select-user-tf-ops",
        llvm::cl::desc(
            "<list of custom tf ops created by the user (comma separated)>"),
        llvm::cl::init(""));
    
    // NOLINTNEXTLINE
    opt<bool> unfold_batchmatmul(
        "unfold_batchmatmul",
        llvm::cl::desc(
            "Whether to unfold TF BatchMatMul to a set of TFL FullyConnected ops."),
        llvm::cl::init(false));
    
    // NOLINTNEXTLINE
    opt<bool> unfold_large_splat_constant(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 20:53:17 UTC 2024
    - 7.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tf_tfl_translate_cl.h

    extern llvm::cl::opt<bool> convert_tf_while_to_tfl_while;
    extern llvm::cl::opt<std::string> select_user_tf_ops;
    extern llvm::cl::opt<bool> allow_all_select_tf_ops;
    extern llvm::cl::opt<bool> unfold_batchmatmul;
    extern llvm::cl::opt<bool> unfold_large_splat_constant;
    extern llvm::cl::opt<bool> guarantee_all_funcs_one_use;
    extern llvm::cl::opt<bool> enable_dynamic_update_slice;
    extern llvm::cl::opt<bool> preserve_assert_op;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 20:53:17 UTC 2024
    - 3.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/python/jax_to_tfl_flatbuffer.cc

      pass_config.unfold_batch_matmul = toco_flags.unfold_batchmatmul();
      pass_config.lower_tensor_list_ops = toco_flags.lower_tensor_list_ops();
      // Disable the unfolding of the 16x16 TF::BatchMatMulOp to avoid the
      // conversion to an unsupported 16x16 TFL::FullyConnectedOp.
      if (toco_flags.inference_type() == toco::IODataType::QUANTIZED_INT16) {
        pass_config.unfold_batch_matmul = false;
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 11 19:29:56 UTC 2024
    - 8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/common/tfl_pass_config.h

      // ops of the same device, under a `tf_device.launch` op.
      bool form_clusters = false;
      // If `unfold_batch_matmul` is true, the tf.BatchMatMul is unfolded to a set
      // of tfl.fully_connected ops.
      bool unfold_batch_matmul = true;
      // Whether to outline WhileOp at the end of the pipeline.
      bool outline_tf_while = false;
      // Whether to do shape inference.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 19:05:30 UTC 2024
    - 6.5K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/experimental/tac/tac_module.cc

        // function.
        // And in fact, we probably need to do the proper legalization, for the
        // compute cost to work. (in case we added some TF ops)
        pass_manager->addPass(mlir::TFL::CreatePrepareTFPass(
            /*unfold_batch_matmul=*/true,
            /*allow_bf16_and_f16_type_legalization=*/false));
        pass_manager->addNestedPass<mlir::func::FuncOp>(
            mlir::createCanonicalizerPass());
        pass_manager->addPass(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 08 01:19:25 UTC 2023
    - 5.6K bytes
    - Viewed (0)
Back to top