Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 74 for Quantized (0.18 sec)

  1. tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.h

    // Returns true iff `type` is a uniform quantized type whose storage type is
    // 32-bit integer and expressed type is f32.
    bool IsI32F32UniformQuantizedType(Type type);
    
    // Returns true iff `type` is a uniform quantized per-axis (per-channel) type
    // whose storage type is 32-bit integer and expressed type is f32.
    bool IsI32F32UniformQuantizedPerAxisType(Type type);
    
    // Determines whether the storage type of a quantized type is supported by
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.cc

                                << quantized_per_axis_type << ".\n");
        return false;
      }
    
      return true;
    }
    
    // Determines whether the storage type of a quantized type is supported by
    // `tfl.quantize` or `tfl.dequantize` ops. ui8, i8 and i16 are supported.
    bool IsSupportedByTfliteQuantizeOrDequantizeOps(IntegerType storage_type) {
      if (storage_type.getWidth() == 8 ||
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize.mlir

        return %7 : tensor<1x3xf32>
      }
    // Test that the inputs and output of the tf.XlaCallModule op has been replaced
    // by quantized types, and the corresponding quantfork.dcast ops that turned
    // those quantized types back to float types are removed.
    // CHECK: %[[CONST_0:.+]] = stablehlo.constant dense<1.000000e+00> : tensor<4x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 01:38:40 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantize_composite_functions.cc

    };
    
    void QuantizeCompositeFunctionsPass::runOnOperation() {
      MLIRContext& ctx = getContext();
    
      PassManager pm(&ctx);
      // Intermediate output from QuantizePass will have quantized ops
      // (XlaCallModuleOps) with quantized input and output types, which are not
      // allowed in the TF dialect.
      pm.enableVerifier(false);
    
      PrepareQuantizePassOptions options;
      options.enable_per_channel_quantized_weight_ =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 02:59:01 UTC 2024
    - 4.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/cc/report.cc

        return result;
      } else {
        return std::nullopt;
      }
    }
    
    // Populates quantized ops from `module_op` to `results`. After going through
    // the quantization passes, quantized ops are represented as `func::CallOp` with
    // a callee's prefix of `quantized_`.
    void PopulateQuantizedResults(ModuleOp module_op,
                                  QuantizationResults& results) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/stablehlo/transforms/passes.td

    def ComposeUniformQuantizedTypePass : Pass<"compose-uniform-quantized-type", "ModuleOp"> {
      let summary = "Compose uniform quantized types in StableHLO.";
      let constructor = "mlir::odml::CreateComposeUniformQuantizedTypePass()";
      let description = [{
        Identifies uniform quantization patterns and composes them to uniform
        quantized types. This pass targets a specific set of models that are
        quantized from the framework level, which produces "decomposed"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 21:59:06 UTC 2024
    - 5.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/stablehlo/tests/components/post_calibration_component.mlir

    // RUN: stablehlo-quant-opt %s -stablehlo-test-post-calibration-component='unpack-quantized-types=false' \
    // RUN:   -split-input-file | FileCheck %s --check-prefix=CHECK-NO-UNPACK
    
    // Tests that a simple dot_general (lifted as a function) with CustomAggregators
    // around it is quantized. The resulting graph has quantized types unpacked into
    // int ops.
    func.func @main(%arg0: tensor<1x1024xf32>) -> tensor<1x3xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 01:09:50 UTC 2024
    - 6.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/quantization/ir/ConvertConst.cc

    /// quantized and the operand type is quantizable.
    
    LogicalResult QuantizedConstRewrite::matchAndRewrite(
        QuantizeCastOp qbarrier, PatternRewriter &rewriter) const {
      Attribute value;
    
      // Is the operand a constant?
      if (!matchPattern(qbarrier.getArg(), m_Constant(&value))) {
        return failure();
      }
    
      // Does the qbarrier convert to a quantized type. This will not be true
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 4.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/decompose_hybrid_quantization.cc

    // (e.g. matmul) has both quantized and unquantized inputs by dequantizing
    // the quantized inputs, performing the operation in the expressed type, then
    // requantizing if a quantized output is required.
    //
    // The motivation behind these changes is for Dialects that assume only float
    // or quantized computation, and do not support a mixture of these types on
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.h

    // and NumericVerify ops to compare output values from the quantized and float
    // ops.
    //
    // When `legacy_float_scale` is true, the quantizer will use float scale instead
    // of double, and call TOCO's quantization routines to maintain bit-exactness of
    // the values with the TOCO quantizer.
    TfLiteStatus QuantizeModel(
        absl::string_view model_buffer, const tflite::TensorType &input_type,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 2.8K bytes
    - Viewed (0)
Back to top