- Sort Score
- Result 10 results
- Languages All
Results 1 - 6 of 6 for GetQuantizationTypeWidth (0.36 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.cc
return true; } OpBuilder builder(func); bool is_signed = quant_specs_.IsSignedInferenceType(); IntegerAttr num_bits = builder.getI32IntegerAttr(quant_specs_.GetQuantizationTypeWidth()); BoolAttr narrow_range = builder.getBoolAttr(false); auto add_quantize_op = [&](Location loc, Type input_type, Block* block, Block::iterator insertion_point, Value arg,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc
return true; } OpBuilder builder(func); bool is_signed = quant_specs_.IsSignedInferenceType(); IntegerAttr num_bits = builder.getI32IntegerAttr(quant_specs_.GetQuantizationTypeWidth()); BoolAttr narrow_range = builder.getBoolAttr(false); auto add_quantize_op = [&](Location loc, Type input_type, Block* block, Block::iterator insertion_point, Value arg,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize_drq.cc
const bool is_narrow_range = true; const bool is_legacy_float = quant_specs_.legacy_float_scale; const bool is_signed = quant_specs_.IsSignedInferenceType(); const int bit_width = quant_specs_.GetQuantizationTypeWidth(); std::unique_ptr<OpQuantSpec> spec = GetTFOpQuantSpec(quantized_op); const int quant_dim = spec->coeff_op_quant_dim[weight_idx]; const bool is_per_channel_quantization =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.h
return false; default: return true; } } // Gets the width of this quantization type. Returns 0 if it isn't a // quantization type. int64_t GetQuantizationTypeWidth() const { switch (inference_type) { case tensorflow::DT_INT8: case tensorflow::DT_UINT8: case tensorflow::DT_QINT8: case tensorflow::DT_QUINT8: return 8;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 13 10:16:19 UTC 2024 - 10.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc
bool is_narrow_range = true; bool is_legacy_float = quant_specs_.legacy_float_scale; bool is_signed = quant_specs_.IsSignedInferenceType(); int bit_width = quant_specs_.GetQuantizationTypeWidth(); Operation* quantize_op = quant_op.first; int quantize_operand_num = quant_op.second; auto affine_user = dyn_cast<AffineQuantizedOpInterface>(quantize_op);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h
const quant::QuantizationSpecs& quant_specs) : ConvertOpStatsToQDQs<SourceOp>(context, quant_specs), activation_number_of_bits_(quant_specs.GetQuantizationTypeWidth()) {} LogicalResult matchAndRewrite(SourceOp op, PatternRewriter& rewriter) const override { operator_property::OpVariant lstm_variant;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 28K bytes - Viewed (0)