- Sort Score
- Result 10 results
- Languages All
Results 161 - 170 of 203 for dequantize (0.3 sec)
-
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h
#include "tensorflow/core/framework/types.pb.h" #include "tensorflow/lite/tools/optimize/operator_property.h" //===----------------------------------------------------------------------===// // The prepare-quantize Pass for LSTM. // namespace mlir { namespace TFL { constexpr double power_of_two_scale = 32768.0; // Same with the ordering of //tensorflow/compiler/mlir/lite/ir/tfl_ops.td
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 28K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/fake_quant_e2e_xla.mlir
// RUN: tf-quant-opt %s -split-input-file -quant-convert-fake-quant-to-qdq -quant-lift-quantizable-spots-as-functions='target-opset=XLA' -quant-insert-quantized-functions -quant-quantize-composite-functions='target-opset=XLA' -symbol-dce -inline -tf-shape-inference -canonicalize -quant-replace-cast-hacks-with-tf-xla-ops -cse -quant-optimize | FileCheck %s module attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 1219 : i32}, tf_saved_model.semantics} {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 7.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/component.h
const std::unordered_set<std::string> tags_; const absl::flat_hash_map<std::string, tensorflow::SignatureDef> signature_def_map_; // Signature keys to identify the functions to load & quantize. const std::vector<std::string> signature_keys_; }; // Runs passes to prepare the calibration model. absl::Status RunCalibrationPasses(mlir::ModuleOp module_op, MLIRContext& ctx,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 5.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/passes/decompose.cc
// The pass to decompose unregistered TF ops with the TFR compose function. // namespace mlir { namespace TFR { namespace { // Quantize the float value based on given scale and zero point attributes. IntegerAttr Quantize(float value, Attribute scale_attr, Attribute zp_attr, OpBuilder builder) { double scale = mlir::cast<FloatAttr>(scale_attr).getValueAsDouble();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 14.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.cc
pm.addPass(TFL::CreatePostQuantizeRemoveQDQPass()); if (failed(pm.run(module.get()))) { const std::string err(statusHandler.ConsumeStatus().message()); LOG(ERROR) << "Failed to quantize: " << err; return kTfLiteError; } // Export the results. tflite::FlatbufferExportOptions options; options.toco_flags.set_force_select_tf_ops(false);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py
_QuantizationComponent.COMPONENT_ACTIVATION ].tensor_type ) # Unlike the HISTOGRAM_PERCENTILE method, the HISTOGRAM_MSE method uses # num_bits because it actually quantizes and dequantizes values. if activation_tensor_type != _TensorType.TENSORTYPE_INT_8: raise ValueError( 'Only TENSORTYPE_INT_8 is supported for HISTOGRAM_MSE calibration'
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 34.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_quantize_drq_per_channel.mlir
// RUN: tf-quant-opt %s -split-input-file -quant-preprocess-op -quant-prepare-quantize-drq='enable-per-channel-quantization=true' | FileCheck %s module { func.func @matmul(%arg0: tensor<1x2x2x3xf32>) -> (tensor<*xf32>) { %cst_0 = "tf.Const"() {value = dense<0.000000e+00> : tensor<2x1024xf32>} : () -> tensor<2x1024xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 6.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.cc
if (dot_general_op == nullptr) return std::nullopt; const int64_t filter_rank = mlir::dyn_cast<ShapedType>(dot_general_op.getOperand(1).getType()) .getRank(); // To quantize rhs per-channel, we currently only consider the case where // `stablehlo.dot_general` is legalizable to `tfl.fully_connected`. const bool is_per_axis_quantizable = IsDotGeneralFullyConnected(dot_general_op).value();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_quantize_drq.mlir
// RUN: tf-quant-opt %s -split-input-file -quant-preprocess-op -quant-prepare-quantize-drq | FileCheck %s module { func.func @matmul(%arg0: tensor<1x2x2x3xf32>) -> (tensor<*xf32>) { %cst_0 = "tf.Const"() {value = dense<0.000000e+00> : tensor<2x1024xf32>} : () -> tensor<2x1024xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 6.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/prepare_quantize/prepare_quantize_per_channel.mlir
// RUN: stablehlo-quant-opt %s -split-input-file -stablehlo-prepare-quantize=enable-per-channel-quantized-weight=true -verify-diagnostics | FileCheck %s // ----- module { // CHECK-LABEL: conv_with_bias_and_relu func.func private @conv_with_bias_and_relu(%arg0: tensor<1x3x2x3xf32>) -> tensor<1x2x2x2xf32> { %cst = "tf.Const"() {device = "", value = dense<[7.11401462, 7.05456924]> : tensor<2xf32>} : () -> tensor<2xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 26 07:48:15 UTC 2024 - 8.6K bytes - Viewed (0)