- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 306 for Quantized (0.81 sec)
-
tensorflow/compiler/mlir/lite/stablehlo/tests/compose-uniform-quantized-type.mlir
// RUN: odml-to-stablehlo-opt --compose-uniform-quantized-type \ // RUN: --split-input-file --verify-diagnostics %s | FileCheck %s module { // CHECK-LABEL: quantized_conv_op // CHECK-SAME: %[[ARG:.*]]: tensor<1x3x3x4xf32> func.func @quantized_conv_op(%arg0: tensor<1x3x3x4xf32>) -> tensor<1x3x3x4xf32> { %1 = stablehlo.constant dense<1.000000e+03> : tensor<1x1x1x1xf32> // Input inverse scale.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 17:10:32 UTC 2024 - 37K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/uniform-quantized-stablehlo-to-tfl.mlir
// RUN: odml-to-stablehlo-opt --uniform-quantized-stablehlo-to-tfl \ // RUN: --split-input-file --verify-diagnostics %s | FileCheck %s // ============================================================================ // The following functions tests example quantization patterns outputted from // JAX Quantizer. JAX Quantizer should output integer types, which are // composed into `UniformQuantized{|PerAxis}Type` via // `compose_uniform_quantized_type_pass.cc`.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 17:10:32 UTC 2024 - 106.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc
!IsConnectedWithCompsiteFunction(quantizing_op)) { continue; } // Same scale op is not supported for Uniform Quantized ops. if (target_opset_ == OpSet::UNIFORM_QUANTIZED) { continue; } // Collect all the quantized inputs and "clone" the matched op by these // inputs. SmallVector<Value, 4> inputs; inputs.reserve(quantizing_op->getNumOperands());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 05:52:39 UTC 2024 - 23.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/quantization_options.proto
// If the selected quantization option is not available, StableHLO quantizer // will raise an error. // NEXT ID: 2 message CustomQuantizationMethod { // Specify component name, bit width, and other specs for all compoenents // intended to be quantized. repeated QuantizationComponentSpec quantization_component_spec = 1; } // Quantization spec per each component designated to be quantized.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 22 02:20:05 UTC 2023 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization.td
string name = n; string asTraitArgsStr = !interleave(params, ", ") # !if(signed, ", true", ", false"); } // Uniform quantized types. Two integers "smantissa" and "sexp" are used to // express the Mantissa and Exponent components of the floating-point scale so // the scale of the quantized type is "smantissa * 10 ^ sexp". class UInt8UniformQuantizedType<int zero_pt, int smantissa, int sexp> : QuantizedType<"Uniform",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 07:39:40 UTC 2024 - 8.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.h
// Returns true iff `type` is a uniform quantized type whose storage type is // 32-bit integer and expressed type is f32. bool IsI32F32UniformQuantizedType(Type type); // Returns true iff `type` is a uniform quantized per-axis (per-channel) type // whose storage type is 32-bit integer and expressed type is f32. bool IsI32F32UniformQuantizedPerAxisType(Type type); // Determines whether the storage type of a quantized type is supported by
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td
} def QuantizeCompositeFunctionsPass : Pass<"stablehlo-quantize-composite-functions", "ModuleOp"> { let summary = "Quantize composite functions with QDQ input / outputs."; let options = [ Option<"enable_per_channel_quantized_weight_", "enable-per-channel-quantized-weight", "bool", /*default=*/"true", "Whether to enable per-channel quantized weights.">, Option<"mlir_dump_file_name_", "mlir-dump-file-name",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 10.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc
// // 1. Replaces quantized `TF::XlaCallModuleOp` with a `func::CallOp`. // 2. Quantizes the callee function. // // The inputs of this pattern assumes an invalid IR, where even if a // `TF::XlaCallModuleOp` is quantized the callee remains unquantized. Step (2) // not only replaces the input and output tensor types into quantized ones, but // also rewrites the body with a quantized equivalent. //
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 06:04:36 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h
}; // Converts quantized tensor type with signed integer type to quantized tensor // type with unsigned integer type. Type ConvertSignedQuantizedToUnsigned(Type signed_tensor_type, Location loc); // Converts quantize ops with unsigned quantized types to these with signed // quantized types and preserves the scales. template <typename QuantizeOpT>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_xla.mlir
// CHECK: -------- Quantization Summary -------- // CHECK: Number of quantized layers in the model // CHECK: -------------------------------- // CHECK: Name Count/Total // CHECK: ================================ // CHECK: Conv2D 1/1 // CHECK: Number of quantized layers with quantized outputs: 0/1 // CHECK: Number of quantize layers added: 1 // CHECK: Number of dequantize layers added: 0 } // -----
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jan 08 01:16:10 UTC 2024 - 25.2K bytes - Viewed (0)