- Sort Score
- Result 10 results
- Languages All
Results 91 - 100 of 323 for quantized (0.29 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/add_dump_tensor_op.cc
// unquantized_tensor_data.pb as it is used by unquantized dump model. // After saving unquantized dump model, the file name will be changed to // quantized_tensor_data.pb. // Since this process doesn't happen for per layer, we need to set file_name // as quantized_tensor_data.pb here. // TODO: b/296933893 - Refactor the debugger code when no quantize option // is added std::string file_name =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 22:55:22 UTC 2024 - 13K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_op_base.td
"32-bit quantized integer">; def TF_Quint8 : AnyTypeOf< [TF_TensorFlowType<"Quint8", "quint8">, TF_Quint8Ref], "8-bit quantized unsigned integer">; def TF_Quint16 : AnyTypeOf< [TF_TensorFlowType<"Quint16", "quint16">, TF_Quint16Ref], "16-bit quantized unsigned integer">; // Any quantized type def TF_Quantized : AnyTypeOf< [TF_Qint8, TF_Qint16, TF_Qint32, TF_Quint8, TF_Quint16], "quantized">;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 30.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/quantize_patterns.td
include "tensorflow/compiler/mlir/lite/ir/tfl_ops.td" // Quantize attribute $0 by using quantization parameter from %1. def QuantizeByQuantizedType : NativeCodeCall<"quant::Quantize($0, $1.getValue())">; def F32ElementsAttr : ElementsAttrBase< CPred<"$_self.cast<ElementsAttr>().getShapedType().getElementType().isF32()">, "float constant tensor">; // Squash tfl.dequantize and tfl.quantize pairs.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 23:10:13 UTC 2024 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/const_tensor_utils.h
namespace mlir { namespace TFL { bool IsQuantized(const tflite::TensorT& tensor); absl::StatusOr<mlir::quant::QuantizedType> GetQuantizedType( const tflite::TensorT& tensor, mlir::Builder builder, bool is_constant = false, mlir::Type storage_type = {}); // Imports float tensor with calibration value into calibrated quantized type. absl::StatusOr<mlir::quant::QuantizedType> GetCalibratedQuantizedType(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 07 23:04:40 UTC 2024 - 2.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py
!= _PresetMethod.METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8 ): raise ValueError( 'StableHLO quantized opset currently only supports static range' ' quantization and weight-only quantizationvia TF Quantizer.' ) # Set `force_graph_mode_calibration` to True to avoid skipping op execution, # which are not connected to return ops, during calibration execution.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 34.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_quantized_functions_weight_only.mlir
// RUN: tf-quant-opt %s -quant-insert-quantized-functions='quantization-method=weight_only target-opset=XLA' | FileCheck %s // Empty module module { func.func @simple_fn(%arg0: tensor<*xf32>) -> tensor<*xf32> { func.return %arg0 : tensor<*xf32> } } // CHECK-NOT: func private @internal_dequantize_f32 // CHECK-NOT: func private @internal_conv3d_fn // CHECK-NOT: func private @internal_batch_matmul_fn
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 16 03:34:36 UTC 2023 - 843 bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_tfl_translate_cl.cc
"tf-custom-opdefs", llvm::cl::desc("List of custom opdefs when importing " "graphdef")); // Quantize and Dequantize ops pair can be optionally emitted before and after // the quantized model as the adaptors to receive and produce floating point // type data with the quantized model. Set this to `false` if the model input is // integer types. // NOLINTNEXTLINE opt<bool> emit_quant_adaptor_ops(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 20:53:17 UTC 2024 - 7.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.h
// Stores information about how to quantize a user-specified custom operation. // CustomOpInfo contains info of its corresponding CustomOp registered in the // CustomOpMap. 'quantizable_input_indices' is used to determine which indices // of the CustomOp are quantizable. 'is_weight_only' is used specify whether the // custom op is quantized only for storage and dequantized at runtime.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 4.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_tfl_passes.cc
// The following two passes find specific uniform quantization patterns in // StableHLO and converts them to TFLite ops that accept or produce uniform // quantized types. They only target a specific set of models that contain // "decomposed" quantized ops produced from the framework level. This is why // they are placed right after the `LegalizeTFXlaCallModuleToStablehloPass`
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 18:45:51 UTC 2024 - 25.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/preprocess_op.cc
clEnumValN(OpSet::XLA, "XLA", "Uses TF XLA ops"), clEnumValN(OpSet::UNIFORM_QUANTIZED, "UNIFORM_QUANTIZED", "Uses TF Uniform Quantized ops"))}; Option<QuantMethod> quantization_method_{ *this, "quantization-method", llvm::cl::init(tensorflow::quantization::QuantizationMethod:: METHOD_STATIC_RANGE_INT8),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.4K bytes - Viewed (0)