- Sort Score
- Result 10 results
- Languages All
Results 101 - 110 of 193 for Quantile (0.14 sec)
-
tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.cc
converters.reserve(dim_size); for (int i = 0, e = dim_size; i != e; ++i) { converters.push_back(getPerChunkConverter(i)); } // Scan the elements of the dense elements attributes and quantize them by // using the right quantization parameters. int64_t flatten_index = 0; auto shape = type.getShape(); int64_t chunk_size = std::accumulate(std::next(shape.begin(), quantization_dim_ + 1),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 4.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized_drq.mlir
// PTQ // // Internal functions should be marked as private. They will be inlined and // deleted in `InsertQuantizedFunctionsPass`. // // For Uniform Quantized op case, attributes are generated during quantize // composite pass. Therefore, attr_map is set to an empty string. module { // Currently only 4-d case is supported func.func @quantized_conv2d_fn(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Dec 01 12:06:54 UTC 2022 - 3.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.h
#include "tensorflow/core/framework/types.pb.h" #include "tensorflow/lite/tools/optimize/reduced_precision_support.h" namespace mlir { namespace quant { // Stores information about how to quantize a user-specified custom operation. struct CustomOpInfo { std::vector<std::int32_t> quantizable_input_indices; bool is_weight_only = false; bool no_side_effect = true; };
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 13 10:16:19 UTC 2024 - 10.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_custom_aggregation_ops.cc
if (IsCallToQuantizableLiftedFunction(op)) { std::optional<StringRef> composite_function_name = GetCompsiteFunctionName(op); if (!composite_function_name.has_value()) return failure(); // Quantize inputs of quantizable composite functions. for (OpOperand &input : op->getOpOperands()) { Type element_type = getElementTypeOrSelf(input.get().getType()); // Non-float cases won't be calibrated.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 14.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantize_composite_functions.cc
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" // IWYU pragma: keep #define DEBUG_TYPE "quantize-composite-functions" namespace mlir::quant::stablehlo { #define GEN_PASS_DEF_QUANTIZECOMPOSITEFUNCTIONSPASS #include "tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.h.inc" namespace {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 02:59:01 UTC 2024 - 4.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/fake_quant_e2e_flow.mlir
// RUN: tf-quant-opt %s -quant-convert-fake-quant-to-qdq -quant-lift-quantizable-spots-as-functions -quant-insert-quantized-functions -quant-quantize-composite-functions -symbol-dce | FileCheck %s func.func @fake_quant_conv(%arg0: tensor<1x3x4x3xf32>, %arg1: tensor<2x3x3x2xf32>) -> tensor<*xf32> { %cst = "tf.Const"() {value = dense<0.000000e+00> : tensor<2xf32>} : () -> tensor<2xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 3.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-tf.mlir
// CHECK: return %[[RESULT]] } func.func @QDQsFollowedByTranspose(tensor<1x2xf32>) -> (tensor<2x1xf32>) { ^bb0(%arg0: tensor<1x2xf32>): %cst_0 = arith.constant dense<[1, 0]> : tensor<2xi32> %0 = "tfl.quantize"(%arg0){qtype = tensor<1x2x!quant.uniform<u8:f32, 1.0>>}: (tensor<1x2xf32>) -> (tensor<1x2x!quant.uniform<u8:f32, 1.0>>) %1 = "tfl.dequantize"(%0): (tensor<1x2x!quant.uniform<u8:f32, 1.0>>) -> (tensor<1x2xf32>)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 29 07:26:59 UTC 2024 - 59.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/quantization_context.cc
LLVM_DEBUG(llvm::dbgs() << "Quantization parameters are not collected in an ideal place. " "Has to fallback values which might introduce errors.\n"); // Use the first immutable state to quantize the rest operands and results. if (!immutable_states.empty()) return immutable_states.front()->params; // If there are no immutable states, use the operand's state if it is the
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 08 01:38:03 UTC 2024 - 13.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/tests/device-transform-nnapi.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 4.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/post_quantize.mlir
// RUN: stablehlo-quant-opt %s -split-input-file -stablehlo-post-quantize | FileCheck %s // CHECK-LABEL: @remove_volatile_qdq func.func @remove_volatile_qdq() -> tensor<3x2xf32> { // CHECK: %[[CST:.*]] = stablehlo.constant // CHECK-NOT: "quantfork.qcast" // CHECK-NOT: "quantfork.dcast" // CHECK: return %[[CST]] %cst = stablehlo.constant dense<[[-0.960978984, -0.390246302], [-0.790828585, -0.601039409], [-1.0280807, -1.02731466]]> : tensor<3x2xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 18 20:32:46 UTC 2024 - 4.4K bytes - Viewed (0)