- Sort Score
- Result 10 results
- Languages All
Results 151 - 160 of 291 for Quantized (0.35 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py
) def _is_quantized_function(self, func: function_pb2.FunctionDef) -> bool: """Determine whether a FunctionDef is quantized. Args: func: A FunctionDef object. Returns: True iff `func` is quantized. """ return func.signature.name.startswith('quantized_') def _is_composite_function(self, func: function_pb2.FunctionDef) -> bool:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 21 08:51:46 UTC 2024 - 51.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/component.cc
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 9.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.cc
} // TODO: b/264218457 - Refactor the component below once StableHLO Quantizer // can run DRQ. Temporarily using TF Quantization for StableHLO DRQ. if (!toco_flags.has_quantization_options()) { // The default minimum number of elements a weights array must have to be // quantized by this transformation. const int kWeightsMinNumElementsDefault = 1024;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 23.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.h
assert(scales_.size() == zero_points_.size()); } // Quantize an Attribute by the quantization parameters. Return nullptr if // the conversion fails or the input array isn't an ElementsAttr. ElementsAttr convert(Attribute real_value); private: // Quantize an DenseFPElementsAttr by the quantization parameters. DenseElementsAttr convert(DenseFPElementsAttr attr);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 9.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_drq.mlir
// RUN: tf-quant-opt %s -split-input-file -quant-insert-quantized-functions='quantization-method=drq target-opset=UNIFORM_QUANTIZED' -quant-quantize-composite-functions='quantization-method=drq target-opset=UNIFORM_QUANTIZED' -symbol-dce | FileCheck %s module { // TODO(b/260020937): Support transpose_a, transpose_b for matmul. func.func @matmul(%arg0: tensor<2x12xf32>) -> (tensor<*xf32>) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jan 05 18:35:42 UTC 2024 - 9.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test_base.py
"""Base test class for StableHLO quant tests.""" def setUp(self) -> None: super().setUp() # Many test cases for quantization involve creating and saving the input # model and saving the output quantized model. These two member # attributes can be used to specify the paths for such models, # respectively. These paths will be cleaned up after each test case.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 18.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_quantize_ptq.mlir
// RUN: tf-quant-opt %s -split-input-file -quant-prepare-quantize='post-training-quantize=true' | FileCheck %s // ----- module { func.func @same_scale_ptq_test(%arg0: tensor<*xf32>) -> tensor<*xf32> { %cst = arith.constant dense<[-1, 144]> : tensor<2xi32> %cst_1 = arith.constant dense<1.0> : tensor<144x10xf32> %cst_2 = arith.constant dense<0.1> : tensor<10xf32> %0 = "quantfork.stats"(%arg0) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 01 10:21:29 UTC 2023 - 9.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform_patterns.cc
quant::QuantizedType::getQuantizedElementType(input_type); auto output_quantized_type = quant::QuantizedType::getQuantizedElementType(output_type); // If both the input & output types are non-quantized, they will be both // nullptrs. if (input_quantized_type != output_quantized_type) { return failure(); } int64_t batch = input_type.getDimSize(0); int64_t height = input_type.getDimSize(1);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 25.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/prepare_quantize/prepare_quantize.mlir
// RUN: stablehlo-quant-opt %s -split-input-file -stablehlo-prepare-quantize=enable-per-channel-quantized-weight=false -verify-diagnostics | FileCheck %s // ----- // CHECK-LABEL: func @dot // CHECK-SAME: (%[[ARG_0:.*]]: tensor<?x3xf32>) -> tensor<?x2xf32> func.func @dot(%arg0: tensor<?x3xf32>) -> tensor<?x2xf32> { // CHECK: %[[cst:.*]] = stablehlo.constant // CHECK: %[[q1:.*]] = "quantfork.qcast"(%[[cst]])
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 19:52:06 UTC 2024 - 8.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.td
// Match convolution op with "NHWC" data format or matmul op. def SupportedAffineOpMatcher : NativeCodeCall< "MatchSupportedAffineOp($_self, $0, $1, $2)">; // Checks if a value can be symetrically quantized. def CanBeSymmetricallyQuantized : Constraint<CPred<"CanBeSymmetricallyQuantized($0)">>; // Multiplies the value followed by a FakeQuant op and adjusts its params. def MultiplyFakeQuantValue : NativeCodeCall<
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 14 03:24:59 UTC 2024 - 8.4K bytes - Viewed (0)