- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 294 for Quantized (0.18 sec)
-
tensorflow/compiler/mlir/lite/quantization/ir/ConvertConst.cc
/// quantized and the operand type is quantizable. LogicalResult QuantizedConstRewrite::matchAndRewrite( QuantizeCastOp qbarrier, PatternRewriter &rewriter) const { Attribute value; // Is the operand a constant? if (!matchPattern(qbarrier.getArg(), m_Constant(&value))) { return failure(); } // Does the qbarrier convert to a quantized type. This will not be true
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 4.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.h
bool SetConstantResultParams(Operation* op); // Inserts the Quantize and Dequantize ops after `op`'s `index`-th result. The // quantized element type for the result is `quantized_type`. void QuantizeOpResult(Operation* op, int result_index, QuantizedType quantized_type); // Inserts the Quantize and Dequantize ops after `arg`. The quantized element // type for `arg` is `quantized_type`.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 20 11:42:17 UTC 2024 - 16.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/decompose_hybrid_quantization.cc
// (e.g. matmul) has both quantized and unquantized inputs by dequantizing // the quantized inputs, performing the operation in the expressed type, then // requantizing if a quantized output is required. // // The motivation behind these changes is for Dialects that assume only float // or quantized computation, and do not support a mixture of these types on
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.cc
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h
std::optional<const absl::string_view> mlir_dump_file_prefix = std::nullopt); // Converts dequantize-(quantizable) call-quantize pattern to a single call op // that has quantized input and output types. It is expected for this pass to // emit illegal IR with unsupported quantized input and output types. The // pass following immediately after this one will be responsible for legalizing
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 12.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.h
// and NumericVerify ops to compare output values from the quantized and float // ops. // // When `legacy_float_scale` is true, the quantizer will use float scale instead // of double, and call TOCO's quantization routines to maintain bit-exactness of // the values with the TOCO quantizer. TfLiteStatus QuantizeModel( absl::string_view model_buffer, const tflite::TensorType &input_type,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 2.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc
// This op has been quantized, so we should not consider it again. if (quantized_.contains(op)) continue; quantized_.insert(op); if (auto constant_op = dyn_cast<arith::ConstantOp>(op); constant_op) { // If the workflow requires inferring ranges from the content // (post-training quantization) and it is weight (filter) and hasn't
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 38.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_quantized_functions.cc
// For consistency, we require all quantized composite function to have // the "tf_quant.quantized_ops" attribute. if (!new_func.getSymName().starts_with("quantized_")) continue; if (!new_func->hasAttrOfType<ArrayAttr>("tf_quant.quantized_ops")) { new_func->emitError() << "Missing \"tf_quant.quantized_ops\" " "attribute in the quantized composite function."; signalPassFailure();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 05:52:39 UTC 2024 - 8.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h
<< " is expected to be quantized with " << tensor_property.number_of_bits << " bits, but got " << num_storage_bits << " bits instead."; return failure(); } continue; // skip if it is already quantized. } quant::UniformQuantizedType qtype;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 28K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 91.6K bytes - Viewed (0)