- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 131 for dequantize (0.16 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize_drq.cc
llvm::cl::desc("Whether enable per-channel quantized weights.")}; }; // If the weight is applicable to dynamic range quantization, insert Quantize // and Dequantize ops with per-tensor scale. class PrepareDRQQuantizableOp : public OpRewritePattern<arith::ConstantOp> { public: explicit PrepareDRQQuantizableOp(MLIRContext* context,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-tf.mlir
^bb0(%arg0: tensor<1x2xf32>): %cst_0 = arith.constant dense<[1, 0]> : tensor<2xi32> %0 = "tfl.quantize"(%arg0){qtype = tensor<1x2x!quant.uniform<u8:f32, 1.0>>}: (tensor<1x2xf32>) -> (tensor<1x2x!quant.uniform<u8:f32, 1.0>>) %1 = "tfl.dequantize"(%0): (tensor<1x2x!quant.uniform<u8:f32, 1.0>>) -> (tensor<1x2xf32>) %2 = "tf.Transpose"(%1, %cst_0): (tensor<1x2xf32>, tensor<2xi32>) -> tensor<2x1xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 29 07:26:59 UTC 2024 - 59.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc
// If `value` is produced by tf.Dequantize op, then return the Dequantize op's // input. Otherwise return `value`. auto get_real_input_value = [](Value value) -> Value { Operation* defining_op = value.getDefiningOp(); if (auto dequantize = dyn_cast_or_null<TF::DequantizeOp>(defining_op)) { return dequantize.getInput(); } else if (auto dequantize =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 20:06:54 UTC 2024 - 45.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/quantize_variables.cc
if (!read_variable_op) continue; // Add dequantize. builder.setInsertionPointAfter(read_variable_op); auto new_read_variable_op = builder.create<ReadVariableOp>(read_variable_op.getLoc(), ref_qtype, read_variable_op.getResourceId()); auto new_dq_op = builder.create<DequantizeOp>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc
per_axis_type.getStorageTypeMin(), per_axis_type.getStorageTypeMax()); } auto quantize = builder.create<quantfork::QuantizeCastOp>( q_op.getLoc(), new_value_type.clone(new_qtype), new_value); auto dequantize = builder.create<quantfork::DequantizeCastOp>( dq_op.getLoc(), new_value_type, quantize.getResult()); return dequantize.getResult(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 13.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/passes.td
* A tensor is dequantized using a `func::FuncOp` whose name contains "uniform_dequantize". The first argument is the tensor to be quantized, the second argument is the zero point constant (element type: int) and the third argument is the inverse scale constant (element type: float). * Inputs to the target quantized op is quantized and the outputs are dequantized.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 21:59:06 UTC 2024 - 5.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/passes.td
]; } def DecomposeHybridQuantizationPass : Pass<"tfl-decompose-hybrid-quantization", "mlir::func::FuncOp"> { let summary = "Decomposes hybridge quantization to explicit quantize / dequantize"; let description = [{ Decomposes (with explicit quantize/dequantize ops) selected math operations which exist in the model with hybrid quantization (some arguments/results left in floating point). }];
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 22.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.cc
op->erase(); }); } // Fold quantized i32 (normally bias) into their float values. struct FoldQuantizedI32ToFloat : public OpRewritePattern<TFL::DequantizeOp> { using OpRewritePattern<TFL::DequantizeOp>::OpRewritePattern; LogicalResult matchAndRewrite(TFL::DequantizeOp dequant_op, PatternRewriter& rewriter) const override { // We only fold i32 -> float pattern.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.h
// whose storage type is 32-bit integer and expressed type is f32. bool IsI32F32UniformQuantizedPerAxisType(Type type); // Determines whether the storage type of a quantized type is supported by // `tfl.quantize` or `tfl.dequantize` ops. ui8, i8 and i16 are supported. bool IsSupportedByTfliteQuantizeOrDequantizeOps(IntegerType storage_type); // Returns true if a type is quantized tensor type. bool IsQuantizedTensorType(Type type);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/uniform-quantized-stablehlo-to-tfl.mlir
} // CHECK-LABEL: uniform_dequantize_op_ui16_storage_input // CHECK: stablehlo.uniform_dequantize // CHECK-NOT: tfl.dequantize // ----- // Tests that the pattern doesn't match when the input quantized tensor's // storage type is i32. i32 storage type is not compatible with // `tfl.dequantize`. func.func @uniform_dequantize_op_i32_storage_input(%arg: tensor<2x2x!quant.uniform<i32:f32, 1.000000e+0:8>>) -> tensor<2x2xf32> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 17:10:32 UTC 2024 - 106.2K bytes - Viewed (0)