- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 149 for Quantized (0.23 sec)
-
tensorflow/compiler/mlir/lite/stablehlo/transforms/passes.td
def ComposeUniformQuantizedTypePass : Pass<"compose-uniform-quantized-type", "ModuleOp"> { let summary = "Compose uniform quantized types in StableHLO."; let constructor = "mlir::odml::CreateComposeUniformQuantizedTypePass()"; let description = [{ Identifies uniform quantization patterns and composes them to uniform quantized types. This pass targets a specific set of models that are quantized from the framework level, which produces "decomposed"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 21:59:06 UTC 2024 - 5.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/components/post_calibration_component.mlir
// RUN: stablehlo-quant-opt %s -stablehlo-test-post-calibration-component='unpack-quantized-types=false' \ // RUN: -split-input-file | FileCheck %s --check-prefix=CHECK-NO-UNPACK // Tests that a simple dot_general (lifted as a function) with CustomAggregators // around it is quantized. The resulting graph has quantized types unpacked into // int ops. func.func @main(%arg0: tensor<1x1024xf32>) -> tensor<1x3xf32> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 01:09:50 UTC 2024 - 6.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/ir/ConvertConst.cc
/// quantized and the operand type is quantizable. LogicalResult QuantizedConstRewrite::matchAndRewrite( QuantizeCastOp qbarrier, PatternRewriter &rewriter) const { Attribute value; // Is the operand a constant? if (!matchPattern(qbarrier.getArg(), m_Constant(&value))) { return failure(); } // Does the qbarrier convert to a quantized type. This will not be true
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 4.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/decompose_hybrid_quantization.cc
// (e.g. matmul) has both quantized and unquantized inputs by dequantizing // the quantized inputs, performing the operation in the expressed type, then // requantizing if a quantized output is required. // // The motivation behind these changes is for Dialects that assume only float // or quantized computation, and do not support a mixture of these types on
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.h
// and NumericVerify ops to compare output values from the quantized and float // ops. // // When `legacy_float_scale` is true, the quantizer will use float scale instead // of double, and call TOCO's quantization routines to maintain bit-exactness of // the values with the TOCO quantizer. TfLiteStatus QuantizeModel( absl::string_view model_buffer, const tflite::TensorType &input_type,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 2.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_quantized_functions.cc
// For consistency, we require all quantized composite function to have // the "tf_quant.quantized_ops" attribute. if (!new_func.getSymName().starts_with("quantized_")) continue; if (!new_func->hasAttrOfType<ArrayAttr>("tf_quant.quantized_ops")) { new_func->emitError() << "Missing \"tf_quant.quantized_ops\" " "attribute in the quantized composite function."; signalPassFailure();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 05:52:39 UTC 2024 - 8.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/passes.h
#include "mlir/Pass/Pass.h" // from @llvm-project namespace mlir::quant::stablehlo { // Creates an instance of the ConvertTFQuantOpsToMHLOPass pass, which will // convert TF uniform quantized ops to the corresponding quantized MHLO ops. std::unique_ptr<OperationPass<func::FuncOp>> CreateConvertTFQuantOpsToMHLOPass(); // TODO(b/288094093): Migrate uniform quantization legalization in a separate // pass.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Feb 23 01:41:18 UTC 2024 - 2.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_quantized_functions_drq.mlir
// RUN: tf-quant-opt %s -quant-insert-quantized-functions='quantization-method=drq' | FileCheck %s // RUN: tf-quant-opt %s -quant-insert-quantized-functions='quantization-method=drq target-opset=UNIFORM_QUANTIZED' | FileCheck --check-prefix=UQ-CHECK %s // Empty module module { func.func @simple_fn(%arg0: tensor<*xf32>) -> tensor<*xf32> { func.return %arg0 : tensor<*xf32> } } // CHECK-NOT: func private @internal_calculate_quant_params
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Dec 01 12:06:54 UTC 2022 - 1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization.h
// identifies the `MetaGraphDef`. `quantization_config` determines the behavior // of StableHLO Quantizer. `quantization_py_function_lib` contains python // implementations of certain APIs that are required for calibration. // `module_op` is the input graph to be quantized and it should contain // StableHLO ops. // // Returns a quantized `ModuleOp` in StableHLO, potentially wrapped inside a
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 19 02:44:03 UTC 2024 - 2.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/passes/rewrite_quantized_io.cc
class RewriteQuantizedIOPass : public PassWrapper<RewriteQuantizedIOPass, OperationPass<ModuleOp>> { public: StringRef getArgument() const final { return "tfr-rewrite-quantized-io"; } StringRef getDescription() const final { return "Replaces operands and results that has quantized type with their " "storage types."; } void runOnOperation() override; }; void RewriteQuantizedIOPass::runOnOperation() {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 08 01:19:25 UTC 2023 - 4.5K bytes - Viewed (0)