- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 91 for Quantized (0.16 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.cc
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.h
bool SetConstantResultParams(Operation* op); // Inserts the Quantize and Dequantize ops after `op`'s `index`-th result. The // quantized element type for the result is `quantized_type`. void QuantizeOpResult(Operation* op, int result_index, QuantizedType quantized_type); // Inserts the Quantize and Dequantize ops after `arg`. The quantized element // type for `arg` is `quantized_type`.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 20 11:42:17 UTC 2024 - 16.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h
std::optional<const absl::string_view> mlir_dump_file_prefix = std::nullopt); // Converts dequantize-(quantizable) call-quantize pattern to a single call op // that has quantized input and output types. It is expected for this pass to // emit illegal IR with unsupported quantized input and output types. The // pass following immediately after this one will be responsible for legalizing
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 12.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc
// This op has been quantized, so we should not consider it again. if (quantized_.contains(op)) continue; quantized_.insert(op); if (auto constant_op = dyn_cast<arith::ConstantOp>(op); constant_op) { // If the workflow requires inferring ranges from the content // (post-training quantization) and it is weight (filter) and hasn't
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 38.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h
<< " is expected to be quantized with " << tensor_property.number_of_bits << " bits, but got " << num_storage_bits << " bits instead."; return failure(); } continue; // skip if it is already quantized. } quant::UniformQuantizedType qtype;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 28K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc
QuantizationUnits& quantizable_ops) const { bool quantized = false; // TODO(b/212514817): refactor mode checking to improve code quality for (auto& quant_op : quantizable_ops) { if (quant_specs_.inference_type == tensorflow::DT_QINT8) { quantized |= quantizeOpAsInt8(rewriter, op, quant_op); } else if (quant_specs_.inference_type == tensorflow::DT_HALF) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize_op_with_region.mlir
// RUN: stablehlo-quant-opt %s -split-input-file -stablehlo-quantize -verify-each=false | FileCheck %s // Tests if reduce_window op following quantized function is quantized. module attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 1722 : i32}, tf_saved_model.semantics} { // CHECK-LABEL: main_00 // CHECK-SAME: %[[ARG0:.*]]: tensor<2x3x1x1024xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 18 20:32:46 UTC 2024 - 18.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc
GetAsVector(expected_tensor->shape())); } // Finds the match of the quantized tensor from the possible tensors. Each // possible tensors can be used only once. It checks shape and name if the // tensor is quantized and also checks buffer contents and tensor type if not // quantized. For the quantized case, tensor type and quantizaction params are // expected to be checked in the test body with the match.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 32.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize_drq.cc
QuantizationUnits& quantizable_ops) const { bool quantized = false; for (auto& quant_op : quantizable_ops) { if (quant_specs_.inference_type == tensorflow::DT_QINT8) { quantized |= quantizeOpAsInt8(rewriter, op, quant_op); } } return quantized; } protected: QuantizationSpecs quant_specs_; OpSet op_set_;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.cc
// unquantized tensors are only inserted in the unquantized model // whereas `DumpTensor` ops for the quantized tensors are only inserted // in the quantized model. Both models are required to be able to dump // both quantized and unquantized tensors and compare them offline. if (quantization_options.has_debugger_config() && quantization_options.debugger_config().debugger_type() ==
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 23.8K bytes - Viewed (0)