- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 76 for Quantized (0.11 sec)
-
tensorflow/compiler/mlir/lite/stablehlo/tests/compose-uniform-quantized-type.mlir
// RUN: odml-to-stablehlo-opt --compose-uniform-quantized-type \ // RUN: --split-input-file --verify-diagnostics %s | FileCheck %s module { // CHECK-LABEL: quantized_conv_op // CHECK-SAME: %[[ARG:.*]]: tensor<1x3x3x4xf32> func.func @quantized_conv_op(%arg0: tensor<1x3x3x4xf32>) -> tensor<1x3x3x4xf32> { %1 = stablehlo.constant dense<1.000000e+03> : tensor<1x1x1x1xf32> // Input inverse scale.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 17:10:32 UTC 2024 - 37K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td
} def QuantizeCompositeFunctionsPass : Pass<"stablehlo-quantize-composite-functions", "ModuleOp"> { let summary = "Quantize composite functions with QDQ input / outputs."; let options = [ Option<"enable_per_channel_quantized_weight_", "enable-per-channel-quantized-weight", "bool", /*default=*/"true", "Whether to enable per-channel quantized weights.">, Option<"mlir_dump_file_name_", "mlir-dump-file-name",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 10.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc
// // 1. Replaces quantized `TF::XlaCallModuleOp` with a `func::CallOp`. // 2. Quantizes the callee function. // // The inputs of this pattern assumes an invalid IR, where even if a // `TF::XlaCallModuleOp` is quantized the callee remains unquantized. Step (2) // not only replaces the input and output tensor types into quantized ones, but // also rewrites the body with a quantized equivalent. //
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 06:04:36 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h
}; // Converts quantized tensor type with signed integer type to quantized tensor // type with unsigned integer type. Type ConvertSignedQuantizedToUnsigned(Type signed_tensor_type, Location loc); // Converts quantize ops with unsigned quantized types to these with signed // quantized types and preserves the scales. template <typename QuantizeOpT>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto
// previous quantized layer (Please note that this part is different part // from DEBUGGER_TYPE_FLOAT_PER_LAYER). Each layer in the debugging model // has a DumpTensor, and it is used to save the entire value of outputs from // both the quantized and unquantized layer. DEBUGGER_TYPE_INT_PER_LAYER = 2; // DEBUGGER_TYPE_FLOAT_PER_LAYER creates a debugging model with both
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 14.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.h
namespace mlir::quant::stablehlo { // Checks whether an op is connected with a quantized composite function. If // not, the same-scale op will not be quantized. This decision is based on the // current assumption that the performance gain of the same-scale op itself // could not beat the overhead of the quantize and dequantize routines need to // be added around that op. When the assumption changes, this policy might
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/report_test.cc
// The quantized call op without the _quantization_method attribute is not // captured as a `QuantizationResult`. ASSERT_THAT(results.results(), IsEmpty()); } TEST_F(QuantizationReportTest, InitializeWithModuleOpWithInvalidCalleeName) { // A quantized dot_general op but the callee function has an invalid name. It // is expected to start with `quantized_`.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 10:10:34 UTC 2024 - 18.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.cc
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h
std::optional<const absl::string_view> mlir_dump_file_prefix = std::nullopt); // Converts dequantize-(quantizable) call-quantize pattern to a single call op // that has quantized input and output types. It is expected for this pass to // emit illegal IR with unsupported quantized input and output types. The // pass following immediately after this one will be responsible for legalizing
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 12.3K bytes - Viewed (0)