- Sort Score
- Result 10 results
- Languages All
Results 1 - 8 of 8 for unfold_batchmatmul (0.39 sec)
-
tensorflow/compiler/mlir/lite/tests/end2end/unroll_batch_matmul_disabled.pbtxt
# RUN: tf_tfl_translate -unfold_batchmatmul=false -tf-input-arrays=Placeholder,Placeholder_1 -tf-input-shapes=2,5,3:3,7 -tf-input-data-types=DT_FLOAT,DT_FLOAT -tf-output-arrays=MatMul -output-mlir %s -o - 2>&1 | FileCheck %s node { name: "Placeholder" op: "Placeholder" attr { key: "dtype" value { type: DT_FLOAT } } attr { key: "shape" value { shape { dim { size: 2
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 1.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/end2end/unroll_batch_matmul.pbtxt
# RUN: tf_tfl_translate -tf-input-arrays=Placeholder,Placeholder_1 -tf-input-shapes=2,5,3:3,7 -tf-input-data-types=DT_FLOAT,DT_FLOAT -tf-output-arrays=MatMul -unfold_batchmatmul=true -output-mlir %s -o - 2>&1 | FileCheck %s node { name: "Placeholder" op: "Placeholder" attr { key: "dtype" value { type: DT_FLOAT } } attr { key: "shape" value { shape { dim { size: 2 }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 2.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/python/saved_model_to_tfl_flatbuffer.cc
pass_config.unfold_batch_matmul = toco_flags.unfold_batchmatmul(); pass_config.lower_tensor_list_ops = toco_flags.lower_tensor_list_ops(); // Disable the unfolding of the 16x16 TF::BatchMatMulOp to avoid the // conversion to an unsupported 16x16 TFL::FullyConnectedOp. if (toco_flags.inference_type() == toco::IODataType::QUANTIZED_INT16) { pass_config.unfold_batch_matmul = false; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun May 12 12:39:37 UTC 2024 - 11K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_tfl_translate.cc
} mlir::TFL::PassConfig pass_config(quant_specs); pass_config.emit_builtin_tflite_ops = emit_builtin_tflite_ops; pass_config.lower_tensor_list_ops = lower_tensor_list_ops; pass_config.unfold_batch_matmul = unfold_batchmatmul; pass_config.unfold_large_splat_constant = unfold_large_splat_constant; pass_config.guarantee_all_funcs_one_use = guarantee_all_funcs_one_use;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 14K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/passes.td
"mlir::quant::QuantizationDialect", "mlir::quantfork::QuantizationForkDialect", "mhlo::MhloDialect" ]; let options = [ Option<"unfold_batch_matmul_", "unfold_batchmatmul", "bool", "true", "Unfold BatchMatMul into individual MatMul ops.">, Option<"allow_bf16_and_f16_type_legalization_", "allow-bf16-and-f16-type-legalization",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 22.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/common/tfl_pass_config.h
// ops of the same device, under a `tf_device.launch` op. bool form_clusters = false; // If `unfold_batch_matmul` is true, the tf.BatchMatMul is unfolded to a set // of tfl.fully_connected ops. bool unfold_batch_matmul = true; // Whether to outline WhileOp at the end of the pipeline. bool outline_tf_while = false; // Whether to do shape inference.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 19:05:30 UTC 2024 - 6.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_tfl_passes.cc
mlir::TFL::CreateOptimizeOpOrderPass()); // Add optimization pass after quantization for additional fusing // opportunities. if (!pass_config.unfold_batch_matmul) { // Enable an optimization pass that transforms FC to BatchMatmul only when // `unfold_batch_matmul=false`. pass_manager.addNestedPass<mlir::func::FuncOp>( mlir::TFL::CreateOptimizeBatchMatmulPass()); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 18:45:51 UTC 2024 - 25.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc
PrepareTFPass() = default; PrepareTFPass(const PrepareTFPass &) {} explicit PrepareTFPass(bool unfold_batch_matmul, bool allow_bf16_and_f16_type_legalization, bool use_fake_quant_num_bits = false) { this->unfold_batch_matmul_ = unfold_batch_matmul; this->allow_bf16_and_f16_type_legalization_ = allow_bf16_and_f16_type_legalization;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 21:49:50 UTC 2024 - 64.6K bytes - Viewed (0)