- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 178 for dequantize (0.42 sec)
-
tensorflow/compiler/mlir/lite/transforms/optimize_op_order.cc
// Dequantize ops will produce 3x larger tensors, so we want to move it after // some passthrough ops to reduce the memory consumption. struct PushDownDequantize : public OpRewritePattern<DequantizeOp> { explicit PushDownDequantize(MLIRContext* context) : OpRewritePattern<DequantizeOp>(context) {} LogicalResult matchAndRewrite(DequantizeOp dequantize_op,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/default_quant_params.cc
} TypeAttr type_attr = TypeAttr::get(new_type); auto quantize = builder.create<TFL::QuantizeOp>(value.getLoc(), new_type, value, type_attr); auto dequantize = builder.create<TFL::DequantizeOp>( value.getLoc(), expressed_type, quantize.getOutput()); value.replaceAllUsesWith(dequantize); // `quantize` is using `dequantize` now, so we should set its operand to // `value`.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 9.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/decompose-hybrid-quantization.mlir
// CHECK-DAG: %[[VAL2:.+]] = "tfl.dequantize"(%[[VAL0]]) // CHECK-DAG: %[[VAL3:.+]] = "tfl.dequantize"(%[[VAL1]]) // CHECK-DAG: %[[VAL4:.+]] = "tfl.conv_2d"(%arg0, %[[VAL2]], %[[VAL3]]) <{dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32}>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 13.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/quantization.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 4.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/tests/pick-subgraphs.mlir
%0 = "tfl.dequantize"(%arg0) {tac.device = "GPU", tac.inference_type = "FLOAT"} : (tensor<100x!quant.uniform<i8:f32, 2.000000e-01:-3>>) -> tensor<100xf32> %1 = "tfl.dequantize"(%arg1) {tac.device = "GPU", tac.inference_type = "FLOAT"} : (tensor<100x!quant.uniform<i8:f32, 2.000000e-01:-3>>) -> tensor<100xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 24.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/lower_quantized.mlir
// RUN: tf-opt %s -test-tf-lower-tf | FILECHECK_OPTS="" FileCheck %s // CHECK-LABEL: dequantize func.func @dequantize(%arg0: tensor<2x3x!tf_type.qint8>, %min_range: tensor<f32>, %max_range: tensor<f32>) -> tensor<2x3xf32> { // CHECK-DAG: %[[HALF_RANGE:.*]] = "tf.Const"() <{value = dense<1.280000e+02> : tensor<f32>}> // CHECK-DAG: %[[C255:.*]] = "tf.Const"() <{value = dense<2.550000e+02> : tensor<f32>}>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 2.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/quantize_patterns.td
include "tensorflow/compiler/mlir/lite/ir/tfl_ops.td" // Quantize attribute $0 by using quantization parameter from %1. def QuantizeByQuantizedType : NativeCodeCall<"quant::Quantize($0, $1.getValue())">; def F32ElementsAttr : ElementsAttrBase< CPred<"$_self.cast<ElementsAttr>().getShapedType().getElementType().isF32()">, "float constant tensor">; // Squash tfl.dequantize and tfl.quantize pairs.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 23:10:13 UTC 2024 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions.mlir
// CHECK: %[[dequantize:.*]] = "tf.PartitionedCall"(%[[conv_quant]], %[[out_scale]], %[[out_zp]]) // CHECK-SAME: f = @dequantize_i8 // CHECK: %[[conv_float:.*]] = "tf.PartitionedCall"(%arg0, %[[w_float]], %[[b_float]]) // CHECK-SAME: f = @composite_conv2d_with_bias_and_relu6_fn_1 // CHECK: return %[[dequantize]], %[[conv_float]]
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Nov 06 01:23:21 UTC 2023 - 15.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.h
// After applying the function, a quantize/dequantize functions are created // where the body of each function contains a specific quantization algorithm. // The input of the quantize function has one operand of // IsValueWithQuantizablePrecision and the output is a tensor with supported // quantized precision (like int8). For dequantize function, it is the other way // around.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Mar 24 07:44:40 UTC 2024 - 1.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.h
// Inserts the Quantize ops for requantizing a block argument. void RequantizeArg(BlockArgument arg, RequantizeStates& states); // Inserts the Quantize and Dequantize ops to quantize the value and returns // the Quantize op. void RequantizeValue(Value value, RequantizeStates& states, Location loc); // Returns the quantization parameter satisfies the same scale
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 20 11:42:17 UTC 2024 - 16.8K bytes - Viewed (0)