- Sort Score
- Result 10 results
- Languages All
Results 1 - 2 of 2 for AffineQuantizedOpInterface (0.36 sec)
-
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc
int bit_width = quant_specs_.GetQuantizationTypeWidth(); Operation* quantize_op = quant_op.first; int quantize_operand_num = quant_op.second; auto affine_user = dyn_cast<AffineQuantizedOpInterface>(quantize_op); bool op_with_per_axis_support = false; if (!llvm::dyn_cast_or_null<CustomOp>(quantize_op)) { bool op_with_narrow_range = affine_user &&
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc
// FC and Conv* ops. Restriction for the weight can be relaxed if there are // needs for adjusting scale of variable weights. auto affine_op = dyn_cast<AffineQuantizedOpInterface>(op); auto bias_op = op->getOperand(bias_index).getDefiningOp<arith::ConstantOp>(); if (!affine_op || !bias_op || input_indices.size() != 2) return false;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 38.1K bytes - Viewed (0)