- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 108 for Quantized (0.18 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.h
namespace mlir::quant::stablehlo { // Checks whether an op is connected with a quantized composite function. If // not, the same-scale op will not be quantized. This decision is based on the // current assumption that the performance gain of the same-scale op itself // could not beat the overhead of the quantize and dequantize routines need to // be added around that op. When the assumption changes, this policy might
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.td
// quantized representation may be acceptable. // // Especially early in transformation, it is common to have pairs of // qcast/dcast at points where a transition to a quantized type is // required. In addition, it is also common to have an identity qcast // (where the operand and result type are not quantized) at all points where // it is legal to use a quantized representation (but is not known to be
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Oct 13 12:46:08 UTC 2022 - 10.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/report_test.cc
// The quantized call op without the _quantization_method attribute is not // captured as a `QuantizationResult`. ASSERT_THAT(results.results(), IsEmpty()); } TEST_F(QuantizationReportTest, InitializeWithModuleOpWithInvalidCalleeName) { // A quantized dot_general op but the callee function has an invalid name. It // is expected to start with `quantized_`.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 10:10:34 UTC 2024 - 18.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.cc
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.h
bool SetConstantResultParams(Operation* op); // Inserts the Quantize and Dequantize ops after `op`'s `index`-th result. The // quantized element type for the result is `quantized_type`. void QuantizeOpResult(Operation* op, int result_index, QuantizedType quantized_type); // Inserts the Quantize and Dequantize ops after `arg`. The quantized element // type for `arg` is `quantized_type`.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 20 11:42:17 UTC 2024 - 16.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h
std::optional<const absl::string_view> mlir_dump_file_prefix = std::nullopt); // Converts dequantize-(quantizable) call-quantize pattern to a single call op // that has quantized input and output types. It is expected for this pass to // emit illegal IR with unsupported quantized input and output types. The // pass following immediately after this one will be responsible for legalizing
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 12.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc
// This op has been quantized, so we should not consider it again. if (quantized_.contains(op)) continue; quantized_.insert(op); if (auto constant_op = dyn_cast<arith::ConstantOp>(op); constant_op) { // If the workflow requires inferring ranges from the content // (post-training quantization) and it is weight (filter) and hasn't
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 38.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h
<< " is expected to be quantized with " << tensor_property.number_of_bits << " bits, but got " << num_storage_bits << " bits instead."; return failure(); } continue; // skip if it is already quantized. } quant::UniformQuantizedType qtype;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 28K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc
QuantizationUnits& quantizable_ops) const { bool quantized = false; // TODO(b/212514817): refactor mode checking to improve code quality for (auto& quant_op : quantizable_ops) { if (quant_specs_.inference_type == tensorflow::DT_QINT8) { quantized |= quantizeOpAsInt8(rewriter, op, quant_op); } else if (quant_specs_.inference_type == tensorflow::DT_HALF) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20.8K bytes - Viewed (0)