- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 279 for quantized (0.15 sec)
-
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc
// This op has been quantized, so we should not consider it again. if (quantized_.contains(op)) continue; quantized_.insert(op); if (auto constant_op = dyn_cast<arith::ConstantOp>(op); constant_op) { // If the workflow requires inferring ranges from the content // (post-training quantization) and it is weight (filter) and hasn't
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 38.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/report_test.cc
// The quantized call op without the _quantization_method attribute is not // captured as a `QuantizationResult`. ASSERT_THAT(results.results(), IsEmpty()); } TEST_F(QuantizationReportTest, InitializeWithModuleOpWithInvalidCalleeName) { // A quantized dot_general op but the callee function has an invalid name. It // is expected to start with `quantized_`.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 10:10:34 UTC 2024 - 18.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.h
// and NumericVerify ops to compare output values from the quantized and float // ops. // // When `legacy_float_scale` is true, the quantizer will use float scale instead // of double, and call TOCO's quantization routines to maintain bit-exactness of // the values with the TOCO quantizer. TfLiteStatus QuantizeModel( absl::string_view model_buffer, const tflite::TensorType &input_type,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 2.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/ir/ConvertConst.cc
/// quantized and the operand type is quantizable. LogicalResult QuantizedConstRewrite::matchAndRewrite( QuantizeCastOp qbarrier, PatternRewriter &rewriter) const { Attribute value; // Is the operand a constant? if (!matchPattern(qbarrier.getArg(), m_Constant(&value))) { return failure(); } // Does the qbarrier convert to a quantized type. This will not be true
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 4.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h
std::optional<const absl::string_view> mlir_dump_file_prefix = std::nullopt); // Converts dequantize-(quantizable) call-quantize pattern to a single call op // that has quantized input and output types. It is expected for this pass to // emit illegal IR with unsupported quantized input and output types. The // pass following immediately after this one will be responsible for legalizing
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 12.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_quantized_functions.cc
// For consistency, we require all quantized composite function to have // the "tf_quant.quantized_ops" attribute. if (!new_func.getSymName().starts_with("quantized_")) continue; if (!new_func->hasAttrOfType<ArrayAttr>("tf_quant.quantized_ops")) { new_func->emitError() << "Missing \"tf_quant.quantized_ops\" " "attribute in the quantized composite function."; signalPassFailure();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 05:52:39 UTC 2024 - 8.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h
<< " is expected to be quantized with " << tensor_property.number_of_bits << " bits, but got " << num_storage_bits << " bits instead."; return failure(); } continue; // skip if it is already quantized. } quant::UniformQuantizedType qtype;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 28K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc
QuantizationUnits& quantizable_ops) const { bool quantized = false; // TODO(b/212514817): refactor mode checking to improve code quality for (auto& quant_op : quantizable_ops) { if (quant_specs_.inference_type == tensorflow::DT_QINT8) { quantized |= quantizeOpAsInt8(rewriter, op, quant_op); } else if (quant_specs_.inference_type == tensorflow::DT_HALF) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 91.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization.h
// identifies the `MetaGraphDef`. `quantization_config` determines the behavior // of StableHLO Quantizer. `quantization_py_function_lib` contains python // implementations of certain APIs that are required for calibration. // `module_op` is the input graph to be quantized and it should contain // StableHLO ops. // // Returns a quantized `ModuleOp` in StableHLO, potentially wrapped inside a
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 19 02:44:03 UTC 2024 - 2.7K bytes - Viewed (0)