- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 323 for quantized (0.15 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_xla.mlir
// CHECK: -------- Quantization Summary -------- // CHECK: Number of quantized layers in the model // CHECK: -------------------------------- // CHECK: Name Count/Total // CHECK: ================================ // CHECK: Conv2D 1/1 // CHECK: Number of quantized layers with quantized outputs: 0/1 // CHECK: Number of quantize layers added: 1 // CHECK: Number of dequantize layers added: 0 } // -----
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jan 08 01:16:10 UTC 2024 - 25.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.cc
<< quantized_per_axis_type << ".\n"); return false; } return true; } // Determines whether the storage type of a quantized type is supported by // `tfl.quantize` or `tfl.dequantize` ops. ui8, i8 and i16 are supported. bool IsSupportedByTfliteQuantizeOrDequantizeOps(IntegerType storage_type) { if (storage_type.getWidth() == 8 ||
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.h
bool verify_numeric = false; // Whether to add verification for layer by layer, or on whole model. When // disabled (per-layer) float and quantized ops will be run from same input // (output of previous quantized layer). When enabled, float and quantized ops // will run with respective float and quantized output of previous ops. bool whole_model_verify = false; // Whether to use fake quant attributes to calculate quantization parameters.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 13 10:16:19 UTC 2024 - 10.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize.mlir
return %7 : tensor<1x3xf32> } // Test that the inputs and output of the tf.XlaCallModule op has been replaced // by quantized types, and the corresponding quantfork.dcast ops that turned // those quantized types back to float types are removed. // CHECK: %[[CONST_0:.+]] = stablehlo.constant dense<1.000000e+00> : tensor<4x3xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 18 01:38:40 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 54.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/numerical_utils.cc
} // Calculates the quantized range for a given scale, zero point, minimum and // maximum values, and quantization range. // // Args: // scale: The scale factor for the quantized values. // zero_point: The zero point for the quantized values. // rmin: The minimum value of the quantized values. // rmax: The maximum value of the quantized values. // qmin: The minimum value of the quantization range.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 17 19:57:04 UTC 2023 - 3.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.h
namespace mlir::quant::stablehlo { // Checks whether an op is connected with a quantized composite function. If // not, the same-scale op will not be quantized. This decision is based on the // current assumption that the performance gain of the same-scale op itself // could not beat the overhead of the quantize and dequantize routines need to // be added around that op. When the assumption changes, this policy might
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.9K bytes - Viewed (0) -
tensorflow/c/tf_datatype.h
TF_INT64 = 9, TF_BOOL = 10, TF_QINT8 = 11, // Quantized int8 TF_QUINT8 = 12, // Quantized uint8 TF_QINT32 = 13, // Quantized int32 TF_BFLOAT16 = 14, // Float32 truncated to 16 bits. TF_QINT16 = 15, // Quantized int16 TF_QUINT16 = 16, // Quantized uint16 TF_UINT16 = 17, TF_COMPLEX128 = 18, // Double-precision complex TF_HALF = 19, TF_RESOURCE = 20,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Sep 08 20:13:32 UTC 2023 - 2.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/passes.td
* A tensor is dequantized using a `func::FuncOp` whose name contains "uniform_dequantize". The first argument is the tensor to be quantized, the second argument is the zero point constant (element type: int) and the third argument is the inverse scale constant (element type: float). * Inputs to the target quantized op is quantized and the outputs are dequantized.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 21:59:06 UTC 2024 - 5.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/uniform_quantized_stablehlo_to_tfl_pass.cc
// * Input tensors are per-tensor uniform quantized (i8->f32) // tensors (full integer) with shape [..., r_x, c_x] or [..., c_x, r_x]. // * The filter tensor is a per-tensor uniform quantized (i8->f32) tensor // (constant or activation) with shape [..., r_y, c_y] or [..., c_y, r_y]. // * Output tensors are per-tensor uniform quantized (i8->f32) or // per-channel uniform quantized (i32->f32) tensors. //
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 22 09:00:19 UTC 2024 - 99.8K bytes - Viewed (0)