- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 108 for Quantized (0.18 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_algorithm.py
values greater than quant_max are converted to 2^num_bits - 1. Args: quant_min: The minimum real value that can be represented by a quantized value. quant_max: The maximum real value that can be represented by a quantized value. Returns: (error, quant_min, quant_max): Tuple of weighted mean squared error.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 11 19:29:56 UTC 2024 - 14.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_op_base.td
"32-bit quantized integer">; def TF_Quint8 : AnyTypeOf< [TF_TensorFlowType<"Quint8", "quint8">, TF_Quint8Ref], "8-bit quantized unsigned integer">; def TF_Quint16 : AnyTypeOf< [TF_TensorFlowType<"Quint16", "quint16">, TF_Quint16Ref], "16-bit quantized unsigned integer">; // Any quantized type def TF_Quantized : AnyTypeOf< [TF_Qint8, TF_Qint16, TF_Qint32, TF_Quint8, TF_Quint16], "quantized">;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 30.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/quantize.cc
static bool IsQuantizableCustomOp(Operation* op, const quant::CustomOpMap& custom_op_map) { // In some cases, ops may need to be quantized even though their op trait is // not quantizable. For example, for the case of custom op various ops can // be categorized as cusom ops despite each of them may require different
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 13.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/preprocess_op.cc
clEnumValN(OpSet::XLA, "XLA", "Uses TF XLA ops"), clEnumValN(OpSet::UNIFORM_QUANTIZED, "UNIFORM_QUANTIZED", "Uses TF Uniform Quantized ops"))}; Option<QuantMethod> quantization_method_{ *this, "quantization-method", llvm::cl::init(tensorflow::quantization::QuantizationMethod:: METHOD_STATIC_RANGE_INT8),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py
!= _PresetMethod.METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8 ): raise ValueError( 'StableHLO quantized opset currently only supports static range' ' quantization and weight-only quantizationvia TF Quantizer.' ) # Set `force_graph_mode_calibration` to True to avoid skipping op execution, # which are not connected to return ops, during calibration execution.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 34.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_tfl_passes.cc
// The following two passes find specific uniform quantization patterns in // StableHLO and converts them to TFLite ops that accept or produce uniform // quantized types. They only target a specific set of models that contain // "decomposed" quantized ops produced from the framework level. This is why // they are placed right after the `LegalizeTFXlaCallModuleToStablehloPass`
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 18:45:51 UTC 2024 - 25.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_ops_to_mhlo.cc
return success(); } }; // UniformDequantizeOp takes TF quantized types as input which would have been // converted to the mhlo quantized types. Use OpConversionPattern in order to // retrieve the operand type *after* conversion, using OpAdaptor operand // accessor. // Same for other Uniform Quant Ops that take TF quantized types as input. class ConvertUniformDequantizeOp
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 30.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc
per_axis_type.getStorageTypeMin(), per_axis_type.getStorageTypeMax()); } auto quantize = builder.create<quantfork::QuantizeCastOp>( q_op.getLoc(), new_value_type.clone(new_qtype), new_value); auto dequantize = builder.create<quantfork::DequantizeCastOp>( dq_op.getLoc(), new_value_type, quantize.getResult()); return dequantize.getResult(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 13.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/passes.td
"quantfork::QuantizationForkDialect" ]; let options = [ ListOption<"quantize_allowlist_", "quantize-allowlist", "std::string", "comma separated list of allowlisted functions to be quantized. Only used in tests">, Option<"quantize_signed_", "quantize-signed", "bool", "false", "signed inference type. Only used in tests">,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 22.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_patterns.td
class UsedBy<string op> : Constraint< CPred<"llvm::isa<mlir::TFL::" # op # "Op>(*$0.getUsers().begin())">>; // When the op is passing-through, the output types of the quantized ops need // to be updated as well. Since the quantize op manages its own type by the // "qtype" attribute, we should update the type shape in this attribute. def ReorderTransposeDequantQuant : Pat<(TF_TransposeOp:$old_value
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 10.5K bytes - Viewed (0)