- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 91 for Quantized (0.13 sec)
-
tensorflow/compiler/mlir/tfr/passes/decompose.cc
std::min(quantized, static_cast<int>(std::numeric_limits<int8_t>::max())); quantized = std::max(quantized, static_cast<int>(std::numeric_limits<int8_t>::min())); return builder.getI32IntegerAttr(quantized); } // Decompose the TF ops with the registered composition library. class DecomposeTFOpsPass : public PassWrapper<DecomposeTFOpsPass, OperationPass<func::FuncOp>> { public:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 14.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.cc
// asymmetric range. For a state tensor, assigning correct quantization // parameters is sufficient, and for constants with asymmetric range it's // not correctly quantized by legacy quantizer so call the new Quantize. return Quantize(real_value, tensor_type); } else if (width == 16) { if (const auto uniform_type = dyn_cast<UniformQuantizedType>(q_type)) { const auto quantized_values =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 43.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_uniform_attribute_utils.cc
attrs.push_back(rewriter.getNamedAttr( attr_minmax, rewriter.getI64IntegerAttr(quant_val))); } } return success(); } // This LogicalResult covers both the hybrid and fully quantized op cases. LogicalResult FillAttributesForUniformQuantizedDotOp( PatternRewriter& rewriter, Operation* op, llvm::StringMap<Attribute>& identifier_to_attr,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 18.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/const_tensor_utils.cc
if (!mlir::isa<mlir::IntegerType>(raw_elem_type)) { return absl::InvalidArgumentError( "Quantized tensors must be stored as integers"); } storage_type = mlir::cast<mlir::IntegerType>(raw_elem_type); } // TFlite uses narrow-range [u]int8 for constant buffers of quantized weights. // Since we don't know which ones are weights, we represent this optimization
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 07 23:04:40 UTC 2024 - 16.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/pywrap_quantize_model.cc
src_saved_model_path, signature_keys, tags, quantization_options); if (!exported_model.ok()) return exported_model.status(); // Remove the `tpu` tag from the debug quantized saved model as it is // for CPU. Note the 'tpu' value should be the same as `TPU` defined in // tensorflow/python/saved_model/tag_constants.py. if (quantization_options.has_debugger_config()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 09 06:33:29 UTC 2024 - 12K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_algorithm.py
values greater than quant_max are converted to 2^num_bits - 1. Args: quant_min: The minimum real value that can be represented by a quantized value. quant_max: The maximum real value that can be represented by a quantized value. Returns: (error, quant_min, quant_max): Tuple of weighted mean squared error.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 11 19:29:56 UTC 2024 - 14.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/quantize.cc
static bool IsQuantizableCustomOp(Operation* op, const quant::CustomOpMap& custom_op_map) { // In some cases, ops may need to be quantized even though their op trait is // not quantizable. For example, for the case of custom op various ops can // be categorized as cusom ops despite each of them may require different
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 13.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/preprocess_op.cc
clEnumValN(OpSet::XLA, "XLA", "Uses TF XLA ops"), clEnumValN(OpSet::UNIFORM_QUANTIZED, "UNIFORM_QUANTIZED", "Uses TF Uniform Quantized ops"))}; Option<QuantMethod> quantization_method_{ *this, "quantization-method", llvm::cl::init(tensorflow::quantization::QuantizationMethod:: METHOD_STATIC_RANGE_INT8),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py
!= _PresetMethod.METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8 ): raise ValueError( 'StableHLO quantized opset currently only supports static range' ' quantization and weight-only quantizationvia TF Quantizer.' ) # Set `force_graph_mode_calibration` to True to avoid skipping op execution, # which are not connected to return ops, during calibration execution.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 34.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_tfl_passes.cc
// The following two passes find specific uniform quantization patterns in // StableHLO and converts them to TFLite ops that accept or produce uniform // quantized types. They only target a specific set of models that contain // "decomposed" quantized ops produced from the framework level. This is why // they are placed right after the `LegalizeTFXlaCallModuleToStablehloPass`
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 18:45:51 UTC 2024 - 25.5K bytes - Viewed (0)