- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 79 for Quantile (0.14 sec)
-
tensorflow/compiler/mlir/lite/quantization/numerical_utils.cc
int32_t qmax) { auto quantize = [scale, zero_point](float f) { return zero_point + static_cast<int32_t>(std::round(f / scale)); }; if (rmin.has_value() && rmax.has_value()) { return {std::max(qmin, quantize(rmin.value())), std::min(qmax, quantize(rmax.value()))}; } else if (rmin.has_value()) { return {std::max(qmin, quantize(rmin.value())), qmax}; } else if (rmax.has_value()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 17 19:57:04 UTC 2023 - 3.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_drq.mlir
// RUN: tf-quant-opt %s -split-input-file -quant-lift-quantizable-spots-as-functions -quant-prepare-quantize-drq -quant-quantize='weight-quantization=true' -verify-each=false | FileCheck %s // ----- module { func.func @matmul(%arg0: tensor<1x2x2x3xf32>) -> (tensor<*xf32>) { %cst_0 = "tf.Const"() {value = dense<0.000000e+00> : tensor<2x1024xf32>} : () -> tensor<2x1024xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 19:32:28 UTC 2024 - 1.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/quantization_info.proto
message QuantizationInfo { // min/max of the per axis value range. To quantize the value, the metadata // of the target properties should be specified or read from the ops // quantization specification. message MinMax { float min = 1; float max = 2; } // Affine parameters to quantize the per axis value. The metadata of the // target properties should be specified as well.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 08 03:45:04 UTC 2019 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/default_quant_params.cc
} TypeAttr type_attr = TypeAttr::get(new_type); auto quantize = builder.create<TFL::QuantizeOp>(value.getLoc(), new_type, value, type_attr); auto dequantize = builder.create<TFL::DequantizeOp>( value.getLoc(), expressed_type, quantize.getOutput()); value.replaceAllUsesWith(dequantize); // `quantize` is using `dequantize` now, so we should set its operand to // `value`.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 9.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/tfl_to_std.h
#include "mlir/IR/BuiltinOps.h" // from @llvm-project namespace mlir { namespace TFL { // Converts all the tfl.quantize/tfl.dequantize ops to the ops in the mlir.quant // dialect ones in the function. void ConvertTFLQuantOpsToMlirQuantOps(func::FuncOp func); // Converts all the mlir.quant dialect ops to the tfl.quantize/tfl.dequantize // ops in the function. void ConvertMlirQuantOpsToTFLQuantOps(func::FuncOp func);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 19 00:13:50 UTC 2022 - 2.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/quantization.mlir
// CHECK-LABEL: main func.func @main(%arg0: tensor<1x224x224x3xf32>) -> tensor<1x401408xf32> { // CHECK: %{{.*}} = "tfl.quantize"(%{{.*}}) <{qtype = tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500e-03:128>>}> : (tensor<1x224x224x3xf32>) -> tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500e-03:128>>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 4.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/convert_type.h
// Returns element type from attribute Type 'type_attr'. mlir::Type GetShapeStrippedType(mlir::TypeAttr type_attr); // Returns true if 'val' is not from Quantize op or // from Quantize Op with same quant type as 'qtype_attr' bool NotFromQuantOpOrSameQuantType(mlir::Value val, mlir::TypeAttr qtype_attr); } // namespace tflite
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 2.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/utils/fake_quant_utils.h
// Finally, use the quantization parameter to create the quantize and // dequantize ops, and insert them between the tf.FakeQuantWithMinMaxVarsOp // and its users. auto quantize = rewriter.create<quantfork::QuantizeCastOp>( tf_op.getLoc(), qtype.getValue(), input); auto dequantize = rewriter.create<quantfork::DequantizeCastOp>( tf_op.getLoc(), res_type, quantize.getResult());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/tf-tfl-translate-tf-quantize.mlir
A. Unique TensorFlower <******@****.***> 1713119208 -0700
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Apr 14 18:33:43 UTC 2024 - 1.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/python/converter_python_api.h
const tensorflow::quantization::PyFunctionLibrary* quantization_py_function_library = nullptr); // Quantize the model with calibration data. Throw errors if `fully_quantize` // is specified by the calibration data are not sufficient to quantize the // model. PyObject* MlirQuantizeModel(PyObject* data, bool disable_per_channel, bool fully_quantize, int inference_type,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 18:18:30 UTC 2024 - 3.6K bytes - Viewed (0)