- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 155 for requantize (0.39 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/post_quantize.cc
op->user_begin()->hasTrait<OpTrait::IsTerminator>()) return failure(); } // If the quantize op is a requantize op, it is being used in other scale // adjustments and should be kept. Instead, moving dequantize op before // the requantize op to remove the unnecessary requantize op. if (auto qtype = QuantizedType::getQuantizedElementType(q.getArg().getType())) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 07:39:40 UTC 2024 - 5.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/post_quantize.cc
if (!q->getAttr(kVolatileOpAttrName)) return failure(); // If the quantize op is a requantize op, it is being used in other scale // adjustments and should be kept. Instead, move dequantize op before the // requantize op to remove the unnecessary requantize op. if (const QuantizedType qtype = QuantizedType::getQuantizedElementType(q.getArg().getType())) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 07:39:40 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/quantization_context.cc
auto &requantize = states_manager_.GetOperandRequantizeState(op, i); if (state.IsEmpty() && requantize.pos == RequantizeState::NO_REQUANTIZE) { input_specs.push_back(original_input_specs[i]); } else if (requantize.pos == RequantizeState::ON_OUTPUT) { input_specs.push_back(TypeAttr::get(requantize.params)); } else {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 08 01:38:03 UTC 2024 - 13.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.h
// Inserts the Quantize ops for requantizing a block argument. void RequantizeArg(BlockArgument arg, RequantizeStates& states); // Inserts the Quantize and Dequantize ops to quantize the value and returns // the Quantize op. void RequantizeValue(Value value, RequantizeStates& states, Location loc); // Returns the quantization parameter satisfies the same scale
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 20 11:42:17 UTC 2024 - 16.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/quantization_context.h
struct RequantizeState { // Sometimes, we have to "requantize" the quantization result to satisfy all // the constraints. The "requantize" can happen either on the input or output // of the quantization result. enum RequantizePosition { NO_REQUANTIZE, ON_INPUT, ON_OUTPUT } pos = NO_REQUANTIZE; // Quantization parameters will be used to add the requantize ops. QuantParams params; };
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 08 01:38:03 UTC 2024 - 9.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/quantize_patterns.td
include "tensorflow/compiler/mlir/lite/ir/tfl_ops.td" // Quantize attribute $0 by using quantization parameter from %1. def QuantizeByQuantizedType : NativeCodeCall<"quant::Quantize($0, $1.getValue())">; def F32ElementsAttr : ElementsAttrBase< CPred<"$_self.cast<ElementsAttr>().getShapedType().getElementType().isF32()">, "float constant tensor">; // Squash tfl.dequantize and tfl.quantize pairs.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 23:10:13 UTC 2024 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/post_quantize.cc
op->user_begin()->hasTrait<OpTrait::IsTerminator>()) return failure(); } // If the quantize op is a requantize op, it is being used in other scale // adjustments and should be kept. Instead, moving dequantize op before // the requantize op to remove the unnecessary requantize op. if (auto qtype = quant::QuantizedType::getQuantizedElementType( q.getInput().getType())) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library.mlir
batch_dims = 0 : i64, attr_map = "batch_dims:0"} : (tensor<*xi8>, tensor<*xi32>, tensor<i32>) -> tensor<*xi8> // Requantize as the output quantization params can be different than the input for Gather ops. // Ex: Input can be per-axis quantized while output can be per-tensor. %requantize = "tf.PartitionedCall"(%out, %weight_scale, %weight_zp, %out_scale, %out_zp) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jan 08 01:16:10 UTC 2024 - 30.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-quant.mlir
// CHECK: %[[REQUANTIZE:.*]] = mhlo.uniform_quantize %[[CONVERT_2]] : (tensor<4x!quant.uniform<i8:f32, 1.000000e+00:3>>) -> tensor<4x!quant.uniform<i8:f32, 2.000000e+00:5>> // CHECK: %[[CONVERT_3:.*]] = mhlo.bitcast_convert %[[REQUANTIZE]] : (tensor<4x!quant.uniform<i8:f32, 2.000000e+00:5>>) -> tensor<4xi8>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 01:25:29 UTC 2024 - 37.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/merge_fusion_with_dequantize.cc
auto func_op = dyn_cast_or_null<func::FuncOp>(symbol_table.lookup(func_name)); if (!func_op) return failure(); // The quantized fusion should have requantize and return ops at the end. auto return_op = dyn_cast_or_null<func::ReturnOp>( func_op.getRegion().getBlocks().front().getTerminator()); if (!return_op) return failure();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.9K bytes - Viewed (0)