- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 108 for Quantized (0.26 sec)
-
tensorflow/compiler/mlir/lite/stablehlo/tests/compose-uniform-quantized-type.mlir
// RUN: odml-to-stablehlo-opt --compose-uniform-quantized-type \ // RUN: --split-input-file --verify-diagnostics %s | FileCheck %s module { // CHECK-LABEL: quantized_conv_op // CHECK-SAME: %[[ARG:.*]]: tensor<1x3x3x4xf32> func.func @quantized_conv_op(%arg0: tensor<1x3x3x4xf32>) -> tensor<1x3x3x4xf32> { %1 = stablehlo.constant dense<1.000000e+03> : tensor<1x1x1x1xf32> // Input inverse scale.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 17:10:32 UTC 2024 - 37K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc
!IsConnectedWithCompsiteFunction(quantizing_op)) { continue; } // Same scale op is not supported for Uniform Quantized ops. if (target_opset_ == OpSet::UNIFORM_QUANTIZED) { continue; } // Collect all the quantized inputs and "clone" the matched op by these // inputs. SmallVector<Value, 4> inputs; inputs.reserve(quantizing_op->getNumOperands());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 05:52:39 UTC 2024 - 23.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td
} def QuantizeCompositeFunctionsPass : Pass<"stablehlo-quantize-composite-functions", "ModuleOp"> { let summary = "Quantize composite functions with QDQ input / outputs."; let options = [ Option<"enable_per_channel_quantized_weight_", "enable-per-channel-quantized-weight", "bool", /*default=*/"true", "Whether to enable per-channel quantized weights.">, Option<"mlir_dump_file_name_", "mlir-dump-file-name",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 10.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc
// // 1. Replaces quantized `TF::XlaCallModuleOp` with a `func::CallOp`. // 2. Quantizes the callee function. // // The inputs of this pattern assumes an invalid IR, where even if a // `TF::XlaCallModuleOp` is quantized the callee remains unquantized. Step (2) // not only replaces the input and output tensor types into quantized ones, but // also rewrites the body with a quantized equivalent. //
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 06:04:36 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h
}; // Converts quantized tensor type with signed integer type to quantized tensor // type with unsigned integer type. Type ConvertSignedQuantizedToUnsigned(Type signed_tensor_type, Location loc); // Converts quantize ops with unsigned quantized types to these with signed // quantized types and preserves the scales. template <typename QuantizeOpT>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_xla.mlir
// CHECK: -------- Quantization Summary -------- // CHECK: Number of quantized layers in the model // CHECK: -------------------------------- // CHECK: Name Count/Total // CHECK: ================================ // CHECK: Conv2D 1/1 // CHECK: Number of quantized layers with quantized outputs: 0/1 // CHECK: Number of quantize layers added: 1 // CHECK: Number of dequantize layers added: 0 } // -----
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jan 08 01:16:10 UTC 2024 - 25.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.h
bool verify_numeric = false; // Whether to add verification for layer by layer, or on whole model. When // disabled (per-layer) float and quantized ops will be run from same input // (output of previous quantized layer). When enabled, float and quantized ops // will run with respective float and quantized output of previous ops. bool whole_model_verify = false; // Whether to use fake quant attributes to calculate quantization parameters.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 13 10:16:19 UTC 2024 - 10.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/ir/QuantOps.td
// quantized representation may be acceptable. // // Especially early in transformation, it is common to have pairs of // qcast/dcast at points where a transition to a quantized type is // required. In addition, it is also common to have an identity qcast // (where the operand and result type are not quantized) at all points where // it is legal to use a quantized representation (but is not known to be
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jan 09 03:10:59 UTC 2024 - 10.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto
// previous quantized layer (Please note that this part is different part // from DEBUGGER_TYPE_FLOAT_PER_LAYER). Each layer in the debugging model // has a DumpTensor, and it is used to save the entire value of outputs from // both the quantized and unquantized layer. DEBUGGER_TYPE_INT_PER_LAYER = 2; // DEBUGGER_TYPE_FLOAT_PER_LAYER creates a debugging model with both
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 14.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions.mlir
// CHECK: -------- Quantization Summary -------- // CHECK: Number of quantized layers in the model // CHECK: -------------------------------- // CHECK: Name Count/Total // CHECK: ================================ // CHECK: Conv2D 1/2 // CHECK: Number of quantized layers with quantized outputs: 1/1 // CHECK: Number of quantize layers added: 1 // CHECK: Number of dequantize layers added: 1 } // -----
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Nov 06 01:23:21 UTC 2023 - 15.2K bytes - Viewed (0)