- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 87 for requantize (0.16 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize.mlir
// RUN: stablehlo-quant-opt %s -split-input-file -stablehlo-quantize -verify-each=false | FileCheck %s // Tests for PopulateFusedGemmStylePatterns are handled in // quantize_composite_functions for module-level evaluation of functions. module attributes {tf_saved_model.semantics} { // CHECK: quantize_simple_xla_call_module(%[[ARG_0:.+]]: tensor<1x4xf32>) func.func private @quantize_simple_xla_call_module(%arg0: tensor<1x4xf32>) -> tensor<1x3xf32> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 18 01:38:40 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/tf_to_quant.mlir
// CHECK: %[[CONSTANT:.*]] = "tf.Const"() <{value = dense<0.000000e+00> : tensor<8xf32>}> // CHECK: %[[QUANTIZE:.*]] = "quantfork.qcast"(%[[CONSTANT]]) : (tensor<8xf32>) -> tensor<8x!quant.uniform<i8:f32, 1.000000e+00:-128>> // CHECK: %[[DEQUANTIZE:.*]] = "quantfork.dcast"(%[[QUANTIZE]]) // CHECK: return %[[DEQUANTIZE]] : tensor<8xf32> } // CHECK-LABEL: fakeQuantNotFolded
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 9.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/tf_to_quant_4bit.mlir
// CHECK: %[[CONSTANT:.*]] = "tf.Const"() <{value = dense<0.000000e+00> : tensor<8xf32>}> // CHECK: %[[QUANTIZE:.*]] = "quantfork.qcast"(%[[CONSTANT]]) : (tensor<8xf32>) -> tensor<8x!quant.uniform<i4:f32, 1.000000e+00:-8>> // CHECK: %[[DEQUANTIZE:.*]] = "quantfork.dcast"(%[[QUANTIZE]]) // CHECK: return %[[DEQUANTIZE]] : tensor<8xf32> } // CHECK-LABEL: fakeQuantNotFolded
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 9.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.h
// After applying the function, a quantize/dequantize functions are created // where the body of each function contains a specific quantization algorithm. // The input of the quantize function has one operand of // IsValueWithQuantizablePrecision and the output is a tensor with supported // quantized precision (like int8). For dequantize function, it is the other way // around.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Mar 24 07:44:40 UTC 2024 - 1.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize.mlir
// RUN: tf-quant-opt %s -split-input-file -quant-lift-quantizable-spots-as-functions -quant-quantize -verify-each=false | FileCheck %s func.func private @conv(%input: tensor<1x3x4x3xf32> {tf._user_specified_name = "input_tensor"}) -> tensor<*xf32> attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf_type.shape<1x3x4x3>]} { %weight = arith.constant dense_resource<__elided__> : tensor<2x3x3x2xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 19:32:28 UTC 2024 - 6.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantize.cc
patterns.add<StableHloQuantization, StableHloQuantizationReverse>(&ctx); PopulateCommonQuantizationPatterns(ctx, patterns, enable_per_channel_quantized_weight_); // Quantize all quantizable ops, including ops that are not compute-heavy. PopulateAllQuantizablePatterns(ctx, patterns); if (failed(applyPatternsAndFoldGreedily(module_op, std::move(patterns)))) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 22 07:08:19 UTC 2024 - 5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/tfl_to_std.h
#include "mlir/IR/BuiltinOps.h" // from @llvm-project namespace mlir { namespace TFL { // Converts all the tfl.quantize/tfl.dequantize ops to the ops in the mlir.quant // dialect ones in the function. void ConvertTFLQuantOpsToMlirQuantOps(func::FuncOp func); // Converts all the mlir.quant dialect ops to the tfl.quantize/tfl.dequantize // ops in the function. void ConvertMlirQuantOpsToTFLQuantOps(func::FuncOp func);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 19 00:13:50 UTC 2022 - 2.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/quantization.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 4.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/optimize-after-quantization.mlir
// CHECK-LABEL: fuseMulIntoPerTensorConv2dWithQDQs func.func @fuseMulIntoPerTensorConv2dWithQDQs(%arg0: tensor<256x32x32x3xf32>) -> tensor<256x8x7x3xf32> { %cst = arith.constant dense<1.5> : tensor<3xf32> %cst_0 = arith.constant dense<[1.0, 2.0, 3.0]> : tensor<3xf32> %w = arith.constant dense<2.0> : tensor<3x3x3x3xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jan 05 18:35:42 UTC 2024 - 1.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/default_quant_params.cc
} TypeAttr type_attr = TypeAttr::get(new_type); auto quantize = builder.create<TFL::QuantizeOp>(value.getLoc(), new_type, value, type_attr); auto dequantize = builder.create<TFL::DequantizeOp>( value.getLoc(), expressed_type, quantize.getOutput()); value.replaceAllUsesWith(dequantize); // `quantize` is using `dequantize` now, so we should set its operand to // `value`.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 9.4K bytes - Viewed (0)