Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 14 of 14 for Motivation (0.18 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto

      QuantizationSpecs specs = 4;
    
      // Configures the quantization debugger.
      DebuggerConfig debugger_config = 5;
    
      // Defines calibration options for quantization. This option is only used for
      // activation of static range quantization (SRQ). Quantization calibration
      // method is set to MIN_MAX by default.
      CalibrationOptions calibration_options = 6;
    
      // Path to file to save the quantization report, which is essentially a
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 14.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/fused_kernel_matcher.mlir

    // RUN: tf-opt %s -tf-fused-kernel-matcher | FileCheck %s
    
    //===----------------------------------------------------------------------===//
    // Conv2D + BiasAdd + <Activation> fusions.
    //===----------------------------------------------------------------------===//
    
    // CHECK-LABEL: conv2DBiasAdd_noActivation
    func.func @conv2DBiasAdd_noActivation(%arg0: tensor<128xf32>, %arg1: tensor<1x1x3x128xf32>, %arg2: tensor<8x32x32x3xf32>) -> (tensor<*xf32>) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 13.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/transforms/passes.td

          Option<"quantize_signed_", "quantize-signed", "bool", "false",
                 "signed inference type. Only used in tests">,
          Option<"activation_number_of_bits_", "activation-number-of-bits", "int", "8",
                 "number of bits for inference type. Only used in tests">,
          Option<"post_training_quantize_", "post-training-quantize", "bool", "false",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 22.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training-16bits.mlir

    // RUN: tf-opt %s -tfl-prepare-quantize="quantize-signed=true post-training-quantize=true activation-number-of-bits=16" -cse | FileCheck %s
    
    // CHECK-LABEL: QuantizeUnidirectionalLstmFullPerTensor
    func.func @QuantizeUnidirectionalLstmFullPerTensor(%arg0: tensor<1x2x3xf32>) -> (tensor<1x2x3xf32>) {
      %input = "quantfork.stats"(%arg0) {layerStats = dense<[0.0, 1.0]> : tensor<2xf32>} : (tensor<1x2x3xf32>) -> tensor<1x2x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 26.1K bytes
    - Viewed (0)
Back to top