- Sort Score
- Result 10 results
- Languages All
Results 81 - 90 of 291 for Quantized (0.15 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_algorithm.py
values greater than quant_max are converted to 2^num_bits - 1. Args: quant_min: The minimum real value that can be represented by a quantized value. quant_max: The maximum real value that can be represented by a quantized value. Returns: (error, quant_min, quant_max): Tuple of weighted mean squared error.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 11 19:29:56 UTC 2024 - 14.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/tf_quant_ops.td
limitations under the License. ==============================================================================*/ // This file contains operator definitions for TF uniform quantized ops. include "tensorflow/compiler/mlir/tensorflow/ir/tf_op_base.td" include "mlir/Interfaces/CallInterfaces.td" include "mlir/Interfaces/InferTypeOpInterface.td" include "mlir/IR/OpAsmInterface.td"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 01:09:50 UTC 2024 - 3.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/fake_quant_e2e_xla.mlir
// RUN: tf-quant-opt %s -split-input-file -quant-convert-fake-quant-to-qdq -quant-lift-quantizable-spots-as-functions='target-opset=XLA' -quant-insert-quantized-functions -quant-quantize-composite-functions='target-opset=XLA' -symbol-dce -inline -tf-shape-inference -canonicalize -quant-replace-cast-hacks-with-tf-xla-ops -cse -quant-optimize | FileCheck %s module attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 1219 : i32}, tf_saved_model.semantics} {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 7.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/ops/uniform_op_quant_spec.h
See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // Functions for quantization specifications of Uniform Quantized ops. #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_OPS_UNIFORM_OP_QUANT_SPEC_H_ #define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_OPS_UNIFORM_OP_QUANT_SPEC_H_ #include <memory>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 07:39:40 UTC 2024 - 1.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/component.h
namespace mlir::quant::stablehlo { // Performs post-calibration graph transformation as part of post-training // static-range quantization. // // The resulting `ModuleOp` contains quantized StableHLO ops serialized in // `TF::XlaCallModuleOp`s. They are quantized using the statistics collected // after the calibration step, corresponding to each `TF::CustomAggregatorOp`s // in the input module op. //
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 5.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_op_base.td
"32-bit quantized integer">; def TF_Quint8 : AnyTypeOf< [TF_TensorFlowType<"Quint8", "quint8">, TF_Quint8Ref], "8-bit quantized unsigned integer">; def TF_Quint16 : AnyTypeOf< [TF_TensorFlowType<"Quint16", "quint16">, TF_Quint16Ref], "16-bit quantized unsigned integer">; // Any quantized type def TF_Quantized : AnyTypeOf< [TF_Qint8, TF_Qint16, TF_Qint32, TF_Quint8, TF_Quint16], "quantized">;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 30.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_quantized_functions_weight_only.mlir
// RUN: tf-quant-opt %s -quant-insert-quantized-functions='quantization-method=weight_only target-opset=XLA' | FileCheck %s // Empty module module { func.func @simple_fn(%arg0: tensor<*xf32>) -> tensor<*xf32> { func.return %arg0 : tensor<*xf32> } } // CHECK-NOT: func private @internal_dequantize_f32 // CHECK-NOT: func private @internal_conv3d_fn // CHECK-NOT: func private @internal_batch_matmul_fn
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 16 03:34:36 UTC 2023 - 843 bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/quantize.cc
static bool IsQuantizableCustomOp(Operation* op, const quant::CustomOpMap& custom_op_map) { // In some cases, ops may need to be quantized even though their op trait is // not quantizable. For example, for the case of custom op various ops can // be categorized as cusom ops despite each of them may require different
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 13.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/quantize_patterns.td
include "tensorflow/compiler/mlir/lite/ir/tfl_ops.td" // Quantize attribute $0 by using quantization parameter from %1. def QuantizeByQuantizedType : NativeCodeCall<"quant::Quantize($0, $1.getValue())">; def F32ElementsAttr : ElementsAttrBase< CPred<"$_self.cast<ElementsAttr>().getShapedType().getElementType().isF32()">, "float constant tensor">; // Squash tfl.dequantize and tfl.quantize pairs.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 23:10:13 UTC 2024 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/preprocess_op.cc
clEnumValN(OpSet::XLA, "XLA", "Uses TF XLA ops"), clEnumValN(OpSet::UNIFORM_QUANTIZED, "UNIFORM_QUANTIZED", "Uses TF Uniform Quantized ops"))}; Option<QuantMethod> quantization_method_{ *this, "quantization-method", llvm::cl::init(tensorflow::quantization::QuantizationMethod:: METHOD_STATIC_RANGE_INT8),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.4K bytes - Viewed (0)