- Sort Score
- Result 10 results
- Languages All
Results 111 - 120 of 323 for quantized (0.21 sec)
-
tensorflow/compiler/mlir/lite/quantization/quantization_info.proto
message QuantizationInfo { // min/max of the per axis value range. To quantize the value, the metadata // of the target properties should be specified or read from the ops // quantization specification. message MinMax { float min = 1; float max = 2; } // Affine parameters to quantize the per axis value. The metadata of the // target properties should be specified as well.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 08 03:45:04 UTC 2019 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/passes.td
"quant::QuantizationDialect"]; } def OptimizeIntGraph : Pass<"optimize-int-graph", "mlir::func::FuncOp"> { let summary = "Optimization patterns for quantized integer graph"; let description = [{ This includes patterns for merging addition of zp offset and bias. }]; let constructor = "mlir::quant::stablehlo::CreateOptimizeIntGraphPass()";
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Feb 23 01:41:18 UTC 2024 - 2.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/stablehlo_quantizer_odml_oss.ipynb
"metadata": { "id": "cqeGmbO6PPNd" }, "source": [ "This example shows a JAX Keras reference model converted into a StableHLO module and via `jax2tf`, then quantized in the ODML Converter via the StableHLO Quantizer.\n", "\n",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Apr 12 03:40:43 UTC 2024 - 5.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/numerical_utils.h
// double_multiplier = int_multiplier * 2 ^ (-31 + exponent) // int_multiplier will be range of (2^31, 2^30]. QuantizedMultiplier QuantizeMultiplier(double double_multiplier); // Calculate the effective quantized value range for the scale, zero point. The // range is the minimum range defined by [rmin, rmax] and [qmin, qmax]. QuantizedRange CalculateQuantizedRange(double scale, int32_t zero_point,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 07 18:43:51 UTC 2022 - 1.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/utils/math_utils.cc
quantized_fraction = static_cast<int32_t>(std::round(fraction * (1L << 15))); // Clip extreme values. These are more than enough to overflow int8, the // storage type for quantized values, and the final values will be clamped // no matter what. if (quantized_fraction == (1L << 15)) { quantized_fraction /= 2; ++shift; } if (shift < -15) { shift = 0;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 08:32:43 UTC 2024 - 2.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/ir/FakeQuantSupport.cc
nullptr); } // Special case where min/max is close enough. The tensor contents are all // 0.0s, so the scale is set to 1.0 and the tensor can be quantized to zero // points and dequantized to 0.0. if (std::fabs(rmax - rmin) < std::numeric_limits<double>::epsilon()) { return quant::UniformQuantizedType::getChecked( loc, flags, storageType, expressedType, 1.0, qmin, qmin, qmax);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 21 11:52:27 UTC 2024 - 7.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/report.h
namespace mlir::quant::stablehlo { // A class that manages information about `QuantizableUnit`s post-quantization, // internally in the form of `QuantizationUnits`. It is used to collect // quantization summary from a quantized `ModuleOp` and emit it in a human- and // machine-readable format. class QuantizationReport { public: QuantizationReport() = default; // Initializes `QuantizationReport` by collecting `QuantizationResults` from
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 10:10:34 UTC 2024 - 2.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/tf_quant_ops.h
#define GET_OP_CLASSES #include "tensorflow/compiler/mlir/quantization/tensorflow/passes/tf_quant_ops.h.inc" namespace mlir { namespace quant { // Function to register TensorFlow Uniform Quantized ops. void RegisterOps(); } // namespace quant } // namespace mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 29 08:20:46 UTC 2022 - 2.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/gen_quantized_function_library.py
# See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Generates the quantized function library contained header file.""" import ast import re import string from typing import Sequence from absl import app from absl import flags
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Dec 20 01:38:06 UTC 2022 - 8.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/tests/rewrite_quantized_io.mlir
// RUN: tfr-opt %s -tfr-decompose -tfr-rewrite-quantized-io -verify-diagnostics | FileCheck %s // CHECK-LABEL: @tf__my_requantize tfr.func @tf__my_requantize(%input: !tfr.tensor) -> !tfr.tensor { %raw_data = tfr.quant_raw_data(%input) : (!tfr.tensor) -> !tfr.tensor %scale, %zp = tfr.quant_qparam(%input) : (!tfr.tensor) -> (!tfr.tensor, !tfr.tensor) %result = tfr.call @tf__requantize(%raw_data, %scale, %zp) : (!tfr.tensor, !tfr.tensor, !tfr.tensor) -> !tfr.tensor
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 2.3K bytes - Viewed (0)