- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 22 for qdq (0.04 sec)
-
tensorflow/compiler/mlir/lite/tests/post-quantize.mlir
// QDQ-NEXT: %[[out1:.*]] = "tfl.dequantize"(%[[split]]#0) : (tensor<2x!quant.uniform<u8:f32, 1.000000e+00>>) -> tensor<2xf32> // QDQ-NEXT: %[[out2:.*]] = "tfl.dequantize"(%[[split]]#1) : (tensor<2x!quant.uniform<u8:f32, 1.000000e+00>>) -> tensor<2xf32> // QDQ-NEXT: return %[[out1]], %[[out2]] : tensor<2xf32>, tensor<2xf32> } // CHECK-LABEL: RemoveTrival // QDQ-LABEL: RemoveTrival
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 19.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/convert_fake_quant_to_qdq.cc
StringRef getArgument() const final { // This is the argument used to refer to the pass in // the textual format (on the commandline for example). return "quant-convert-fake-quant-to-qdq"; } StringRef getDescription() const final { // This is a brief description of the pass. return "Convert Fake Quant op to quant.qcast and quant.dcast pairs"; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 15 00:56:15 UTC 2023 - 2.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir
%1 = "tfl.logistic"(%0) : (tensor<1x6x6x16xf32>) -> tensor<1x6x6x16xf32> func.return %1 : tensor<1x6x6x16xf32> // QDQ: %0 = "tfl.dequantize"(%arg0) // QDQ: %1 = "tfl.logistic"(%0) : (tensor<1x6x6x16xf32>) -> tensor<1x6x6x16xf32> // QDQ-NOT:"tfl.quantize" // QDQ: return %1 : tensor<1x6x6x16xf32> } // QDQ-LABEL: QDQNoQuantizeSoftmax
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 67.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/convert_fake_quant_to_qdq.mlir
// RUN: tf-quant-opt %s -quant-convert-fake-quant-to-qdq | FileCheck %s func.func @fakeQuantArgs(%arg0: tensor<8x8x8x8xf32>) -> tensor<8x8x8x8xf32> { %0 = "tf.FakeQuantWithMinMaxArgs"(%arg0) { min = -0.1 : f32, max = 0.2 : f32, num_bits = 8 } : (tensor<8x8x8x8xf32>) -> tensor<8x8x8x8xf32> func.return %0 : tensor<8x8x8x8xf32> } // CHECK: func @fakeQuantArgs
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 24 07:02:54 UTC 2022 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/passes.td
"std::string", "", "Specifies which custom ops are NoSideEffect.">, ]; } def PostQuantizeRemoveQDQPass : Pass<"tfl-post-quantize-remove-qdq", "mlir::func::FuncOp"> { let summary = "Remove qdq from input and output nodes after quantization."; let constructor = "CreatePostQuantizeRemoveQDQPass()"; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 22.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/fake_quant_e2e_flow.mlir
// RUN: tf-quant-opt %s -quant-convert-fake-quant-to-qdq -quant-lift-quantizable-spots-as-functions -quant-insert-quantized-functions -quant-quantize-composite-functions -symbol-dce | FileCheck %s func.func @fake_quant_conv(%arg0: tensor<1x3x4x3xf32>, %arg1: tensor<2x3x3x2xf32>) -> tensor<*xf32> { %cst = "tf.Const"() {value = dense<0.000000e+00> : tensor<2xf32>} : () -> tensor<2xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 3.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.cc
ret.setOperand(i, quantized.front()); } i++; } }); // Check for (Quant (Dequant $in), $qA) "qdq" pairs that couldn't be // eliminated at this point. This only occurs for the pattern // (Quant (Dequant (Quant $in, $qB)), $qA) $qB != $qA // where the qdq pair denotes a non-trivial requantization of an // already quantized value. Since this makes little sense (directly quantizing
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/python/saved_model_to_tfl_flatbuffer.cc
} else if (toco_flags.qdq_conversion_mode() == "NONE") { pass_config.quant_specs.qdq_conversion_mode = mlir::quant::QDQConversionMode::kQDQNone; } else { return errors::InvalidArgument("Unknown QDQ conversion mode: ", toco_flags.qdq_conversion_mode()); } if (toco_flags.has_qdq_conversion_mode() && toco_flags.qdq_conversion_mode() != "NONE") {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun May 12 12:39:37 UTC 2024 - 11K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc
"quantization error!"); }); // Check for (Quant (Dequant $in), $qA) "qdq" pairs that couldn't be // eliminated at this point. This only occurs for the pattern // (Quant (Dequant (Quant $in, $qB)), $qA) $qB != $qA // where the qdq pair denotes a non-trivial requantization of an // already quantized value. Since this makes little sense (directly quantizing
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.cc
} tensorflow::AddQuantizationPasses(mlir::TFL::PassConfig(quant_specs), pm); pm.addPass(TFL::CreateModifyIONodesPass(input_mlir_type, output_mlir_type)); // If the first or final ops are not quantized, remove QDQ. pm.addPass(TFL::CreatePostQuantizeRemoveQDQPass()); if (failed(pm.run(module.get()))) { const std::string err(statusHandler.ConsumeStatus().message()); LOG(ERROR) << "Failed to quantize: " << err;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 6.3K bytes - Viewed (0)