- Sort Score
- Result 10 results
- Languages All
Results 151 - 160 of 203 for dequantize (0.19 sec)
-
tensorflow/compiler/mlir/lite/transforms/decompose_hybrid_quantization.cc
if (QuantizedType::getQuantizedElementType(operand.getType())) { auto newTy = QuantizedType::castToExpressedType(operand.getType()); newOperands.push_back( rewriter.create<TFL::DequantizeOp>(loc, newTy, operand)); continue; } newOperands.push_back(operand); } SmallVector<Type> newResultTys; for (auto result : op->getResults()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_custom_aggregation_ops.cc
if (IsCallToQuantizableLiftedFunction(op)) { std::optional<StringRef> composite_function_name = GetCompsiteFunctionName(op); if (!composite_function_name.has_value()) return failure(); // Quantize inputs of quantizable composite functions. for (OpOperand &input : op->getOpOperands()) { Type element_type = getElementTypeOrSelf(input.get().getType()); // Non-float cases won't be calibrated.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 14.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/fake_quant_e2e_flow.mlir
// RUN: tf-quant-opt %s -quant-convert-fake-quant-to-qdq -quant-lift-quantizable-spots-as-functions -quant-insert-quantized-functions -quant-quantize-composite-functions -symbol-dce | FileCheck %s func.func @fake_quant_conv(%arg0: tensor<1x3x4x3xf32>, %arg1: tensor<2x3x3x2xf32>) -> tensor<*xf32> { %cst = "tf.Const"() {value = dense<0.000000e+00> : tensor<2xf32>} : () -> tensor<2xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 3.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantize_composite_functions.cc
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" // IWYU pragma: keep #define DEBUG_TYPE "quantize-composite-functions" namespace mlir::quant::stablehlo { #define GEN_PASS_DEF_QUANTIZECOMPOSITEFUNCTIONSPASS #include "tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.h.inc" namespace {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 02:59:01 UTC 2024 - 4.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/tests/device-transform-nnapi.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 4.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/post_quantize.mlir
// RUN: stablehlo-quant-opt %s -split-input-file -stablehlo-post-quantize | FileCheck %s // CHECK-LABEL: @remove_volatile_qdq func.func @remove_volatile_qdq() -> tensor<3x2xf32> { // CHECK: %[[CST:.*]] = stablehlo.constant // CHECK-NOT: "quantfork.qcast" // CHECK-NOT: "quantfork.dcast" // CHECK: return %[[CST]] %cst = stablehlo.constant dense<[[-0.960978984, -0.390246302], [-0.790828585, -0.601039409], [-1.0280807, -1.02731466]]> : tensor<3x2xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 18 20:32:46 UTC 2024 - 4.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/prepare_quantize.cc
auto func_op_quant_scale_spec = GetStableHloQuantConstraints; for (auto func_op : module_op.getOps<func::FuncOp>()) { // The function might contain more stats ops than required, and it will // introduce requantize if the calibration stats have conflicts. This tries // to remove all the redundant stats ops. RemoveRedundantStatsOps(func_op, func_op_quant_spec, func_op_quant_scale_spec);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 03 05:11:03 UTC 2024 - 8.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.proto
// Defines various options to specify and control the behavior of the quantizer. // It consists of // 1) Model-wise quantization configuration as a default configuration. If it is // None, the default configuration is "do not quantize the model". // 2) A set of supported operations. // 3) Unit wise quantization precision. // 4) Target hardware name. // NEXT ID: 18 message QuantizationOptions {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 19 06:31:19 UTC 2024 - 9.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/ir/FakeQuantSupport.cc
nullptr); } // Special case where min/max is close enough. The tensor contents are all // 0.0s, so the scale is set to 1.0 and the tensor can be quantized to zero // points and dequantized to 0.0. if (std::fabs(rmax - rmin) < std::numeric_limits<double>::epsilon()) { return quant::UniformQuantizedType::getChecked( loc, flags, storageType, expressedType, 1.0, qmin, qmin, qmax); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 21 11:52:27 UTC 2024 - 7.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize_weight_only.mlir
// RUN: stablehlo-quant-opt %s -split-input-file -stablehlo-quantize | FileCheck %s // Test that hybrid quantized dot_general is produced when q/dq pair only exists // for weight. module attributes {tf_saved_model.semantics} { func.func private @quantize_dot_general_fn(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> attributes {tf._original_func_name = "main_0"} { %cst = stablehlo.constant dense<3.000000e-01> : tensor<2x3xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 17:10:32 UTC 2024 - 4.8K bytes - Viewed (0)