- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 203 for dequantize (0.16 sec)
-
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.h
// Inserts the Quantize ops for requantizing a block argument. void RequantizeArg(BlockArgument arg, RequantizeStates& states); // Inserts the Quantize and Dequantize ops to quantize the value and returns // the Quantize op. void RequantizeValue(Value value, RequantizeStates& states, Location loc); // Returns the quantization parameter satisfies the same scale
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 20 11:42:17 UTC 2024 - 16.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/propagate_quantize_type.cc
auto op_before_dequantize = original_dequantize_op.getOperand(0); // Create a new dequantize op that is propagated. rewriter.setInsertionPointAfter(user_op); TF::PartitionedCallOp new_dequantize_op = cast<TF::PartitionedCallOp>(rewriter.clone(*original_dequantize_op)); // Skip the original dequant op and connect the op before dequantize to the // user op. user_op->setOperand(user_idx, op_before_dequantize);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/tfl_to_std.h
#include "mlir/IR/BuiltinOps.h" // from @llvm-project namespace mlir { namespace TFL { // Converts all the tfl.quantize/tfl.dequantize ops to the ops in the mlir.quant // dialect ones in the function. void ConvertTFLQuantOpsToMlirQuantOps(func::FuncOp func); // Converts all the mlir.quant dialect ops to the tfl.quantize/tfl.dequantize // ops in the function. void ConvertMlirQuantOpsToTFLQuantOps(func::FuncOp func);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 19 00:13:50 UTC 2022 - 2.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/post_quantize.cc
op->user_begin()->hasTrait<OpTrait::IsTerminator>()) return failure(); } // If the quantize op is a requantize op, it is being used in other scale // adjustments and should be kept. Instead, moving dequantize op before // the requantize op to remove the unnecessary requantize op. if (auto qtype = quant::QuantizedType::getQuantizedElementType( q.getInput().getType())) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/post_quantize.cc
op->user_begin()->hasTrait<OpTrait::IsTerminator>()) return failure(); } // If the quantize op is a requantize op, it is being used in other scale // adjustments and should be kept. Instead, moving dequantize op before // the requantize op to remove the unnecessary requantize op. if (auto qtype = QuantizedType::getQuantizedElementType(q.getArg().getType())) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 07:39:40 UTC 2024 - 5.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/depthwise_conv2d_v2.mlir
// CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { // CHECK-NEXT: deprecated_builtin_code: 6, // CHECK-NEXT: version: 1, // CHECK-NEXT: builtin_code: DEQUANTIZE // CHECK-NEXT: }, { // CHECK-NEXT: deprecated_builtin_code: 4, // CHECK-NEXT: version: 2, // CHECK-NEXT: builtin_code: DEPTHWISE_CONV_2D // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jul 14 16:41:28 UTC 2022 - 9.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc
const auto float_graph = model_->subgraphs()->Get(subgraph_idx); // The output graph should have an extra tensor from the added dequantize // op. ASSERT_EQ(quantized_graph->tensors()->size(), float_graph->tensors()->size() + 1); // Check that a dequantize op exists. int32_t dequant_input_idx = -1; int32_t dequant_output_idx = -1;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 32.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/utils/fake_quant_utils.h
// Finally, use the quantization parameter to create the quantize and // dequantize ops, and insert them between the tf.FakeQuantWithMinMaxVarsOp // and its users. auto quantize = rewriter.create<quantfork::QuantizeCastOp>( tf_op.getLoc(), qtype.getValue(), input); auto dequantize = rewriter.create<quantfork::DequantizeCastOp>( tf_op.getLoc(), res_type, quantize.getResult());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_xla.mlir
// CHECK: %[[dequantize:.*]] = "tf.PartitionedCall"(%[[maxpool]] // CHECK-SAME: f = @dequantize_i8 // CHECK: return %[[dequantize]] // CHECK: -------- Quantization Summary -------- // CHECK: Number of quantized layers in the model // CHECK: --------------------------------
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jan 08 01:16:10 UTC 2024 - 25.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/tests/get-alternative-subgraph.mlir
// CHECK-DAG: %[[VAL_8:.*]] = "tfl.pseudo_const"(){{.*}}dense<[384, 128]> : tensor<2xi32> // CHECK: %[[VAL_9:.*]] = "tfl.dequantize"(%[[VAL_0]]) {tac.device = "GPU", tac.inference_type = "FLOAT"} : (tensor<384x512x!quant.uniform<i8:f32, 1.000000e-01>>) -> tensor<384x512xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 20.1K bytes - Viewed (0)