- Sort Score
- Result 10 results
- Languages All
Results 61 - 70 of 178 for dequantize (0.21 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc
per_axis_type.getStorageTypeMin(), per_axis_type.getStorageTypeMax()); } auto quantize = builder.create<quantfork::QuantizeCastOp>( q_op.getLoc(), new_value_type.clone(new_qtype), new_value); auto dequantize = builder.create<quantfork::DequantizeCastOp>( dq_op.getLoc(), new_value_type, quantize.getResult()); return dequantize.getResult(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 13.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/passes.td
]; } def DecomposeHybridQuantizationPass : Pass<"tfl-decompose-hybrid-quantization", "mlir::func::FuncOp"> { let summary = "Decomposes hybridge quantization to explicit quantize / dequantize"; let description = [{ Decomposes (with explicit quantize/dequantize ops) selected math operations which exist in the model with hybrid quantization (some arguments/results left in floating point). }];
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 22.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.cc
op->erase(); }); } // Fold quantized i32 (normally bias) into their float values. struct FoldQuantizedI32ToFloat : public OpRewritePattern<TFL::DequantizeOp> { using OpRewritePattern<TFL::DequantizeOp>::OpRewritePattern; LogicalResult matchAndRewrite(TFL::DequantizeOp dequant_op, PatternRewriter& rewriter) const override { // We only fold i32 -> float pattern.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.h
// whose storage type is 32-bit integer and expressed type is f32. bool IsI32F32UniformQuantizedPerAxisType(Type type); // Determines whether the storage type of a quantized type is supported by // `tfl.quantize` or `tfl.dequantize` ops. ui8, i8 and i16 are supported. bool IsSupportedByTfliteQuantizeOrDequantizeOps(IntegerType storage_type); // Returns true if a type is quantized tensor type. bool IsQuantizedTensorType(Type type);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/uniform-quantized-stablehlo-to-tfl.mlir
} // CHECK-LABEL: uniform_dequantize_op_ui16_storage_input // CHECK: stablehlo.uniform_dequantize // CHECK-NOT: tfl.dequantize // ----- // Tests that the pattern doesn't match when the input quantized tensor's // storage type is i32. i32 storage type is not compatible with // `tfl.dequantize`. func.func @uniform_dequantize_op_i32_storage_input(%arg: tensor<2x2x!quant.uniform<i32:f32, 1.000000e+0:8>>) -> tensor<2x2xf32> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 17:10:32 UTC 2024 - 106.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/modify_io_nodes.cc
returned_type = quant::ConvertSignedQuantizedToUnsigned( dequantize_input.getType(), dequantize_op.getLoc()); // replace the dequantize op by a quantize op TypeAttr type_attr = TypeAttr::get(returned_type); auto quantize_op = builder.create<QuantizeOp>( dequantize_op.getLoc(), returned_type, dequantize_input, type_attr);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/common/tfl_pass_config.h
// have side effects e.g. reduced flatbuffer size. Only certain type // conversions are supported. bool reduce_type_precision = false; // Whether to consider this model a quantized model with quantize/dequantize // ops and to convert kernels to quantized kernels wherever appropriate. quant::QDQConversionMode qdq_conversion_mode = quant::QDQConversionMode::kQDQNone;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 19:05:30 UTC 2024 - 6.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/dense_to_sparse.cc
auto* inst = value.getDefiningOp(); if (!inst) { continue; } // There could be a Dequantize op after the weight tensor in cases like // fp16 post-training quantization. We need to get the weight from the // input of the Dequantize op. if (isa<DequantizeOp>(inst)) { op = inst; value = inst->getOperand(0); inst = value.getDefiningOp(); if (!inst) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 16.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization.td
left as is for weight-only which means the weight is dequantized at runtime. For example, if the kernel does not support dynamic range quantization the graph will be converted into the following IR: %q_w = "tfl.pseudo_qconst"() { qtype = tensor<64x3x3x3x!quant.uniform<i8<-127:127>:f32, 1.000000e+00>> %w = "tfl.dequantize"(%q_w) :
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 07:39:40 UTC 2024 - 8.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/compose_uniform_quantized_type_pass.cc
if (!combined_scale_constant_op) { LLVM_DEBUG(llvm::dbgs() << "Failed to match combined_scale_constant_op.\n"); return failure(); } // Quantize -> Dequantize following r3. auto output_uniform_quantize_call_op = dyn_cast_or_null<func::CallOp>( *combined_scale_multiply_op.getResult().user_begin()); if (!output_uniform_quantize_call_op->hasOneUse()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 64.6K bytes - Viewed (0)