Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 200 for requantize (0.17 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto

      // hardware performs better with integer ops.
      // Default value: true
      optional bool unpack_quantized_types = 1;
    
      // When set to True, requantize op in the quantized fusion will merge with the
      // subsequent dequantize op if present.
      // Default value: false
      // TODO: b/321729008 - re-consider default value after testing on prod model.
      bool merge_fusion_with_dequantize = 2;
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 14.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/stablehlo/transforms/uniform_quantized_stablehlo_to_tfl_pass.cc

      }
    };
    
    // stablehlo.uniform_dequantize -> tfl.dequantize
    class RewriteUniformDequantizeOp
        : public OpRewritePattern<stablehlo::UniformDequantizeOp> {
      using OpRewritePattern<stablehlo::UniformDequantizeOp>::OpRewritePattern;
    
      // Determines whether the input and output types are compatible with
      // `tfl.dequantize`. See the definition for the `DEQUANTIZE` kernel for the
      // detailed limitations
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 22 09:00:19 UTC 2024
    - 99.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir

    // MixedPrecision-NEXT: %[[q:.*]] = "tfl.quantize"(%arg0)
    // MixedPrecision-NEXT: %[[dq:.*]] = "tfl.dequantize"(%[[q]])
    // MixedPrecision-NEXT: %[[q_0:.*]] = "tfl.quantize"(%arg1)
    // MixedPrecision-NEXT: %[[dq_0:.*]] = "tfl.dequantize"(%[[q_0]])
    // MixedPrecision-NEXT: %[[c:.*]] = "tfl.concatenation"(%[[dq]], %[[dq_0]])
    // MixedPrecision-NEXT: %[[q_1:.*]] = "tfl.quantize"(%[[c]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 67.5K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/quantize.mlir

    // RUN: tf-opt %s -tfl-prepare-quantize -tfl-quantize  | FileCheck %s
    // RUN: tf-opt %s -tfl-quantize="legacy-quantize=true" | FileCheck --check-prefix=LEGACY %s
    // RUN: tf-opt %s -tfl-prepare-quantize -tfl-quantize="ops-blocklist=tfl.fully_connected,tfl.softmax locs-blocklist=Block,NullBlock" | FileCheck --check-prefix=BLOCK %s
    
    // CHECK-LABEL: QuantizeFloatConst
    func.func @QuantizeFloatConst() -> tensor<2x2x!quant.uniform<u8:f32, 7.8431372549019615E-4:128>> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 23:10:13 UTC 2024
    - 39.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/quantize-variables.mlir

    // RUN: tf-opt %s -tfl-quantize-variables | FileCheck %s
    // RUN: tf-opt %s -tfl-prepare-quantize -tfl-quantize -tfl-post-quantize -tfl-quantize-variables -tfl-quantize -tfl-post-quantize | FileCheck --check-prefix=WHOLE-PASSES %s
    
    // CHECK-LABEL: QuantizeReadVariable
    func.func @QuantizeReadVariable() -> (tensor<1x2x1x3x!quant.uniform<i8:f32, 1.0>>) {
      %1 = "tfl.var_handle"() : () -> tensor<!tf_type.resource>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir

    // CHECK: %[[QUANTIZE:.*]] = "tfl.quantize"(%[[CST]]) <{qtype = tensor<1x32x42x128x!quant.uniform<i8<-127:127>:f32:0, {1.000000e+00}>>}> {volatile}
    // CHECK: %[[DEQUANTIZE:.*]] = "tfl.dequantize"(%[[QUANTIZE]]) : (tensor<1x32x42x128x!quant.uniform<i8<-127:127>:f32:0, {1.000000e+00}>>) -> tensor<1x32x42x128xf32>
    // CHECK: "tfl.transpose_conv"(%arg1, %[[DEQUANTIZE]], %arg0,
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/post-quantize.mlir

      %2 = "tfl.dequantize"(%1#0) : (tensor<2x!quant.uniform<u8:f32, 1.0>>) -> tensor<2xf32>
      %3 = "tfl.dequantize"(%1#1) : (tensor<2x!quant.uniform<u8:f32, 1.0>>) -> tensor<2xf32>
    
      // unused quantization ops should be removed as well.
      %4 = "tfl.dequantize"(%1#2) : (tensor<2x!quant.uniform<u8:f32, 1.0>>) -> tensor<2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 19.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/quantize-numeric-verify.mlir

    // RUN: tf-opt %s -tfl-prepare-quantize="post-training-quantize=true" -tfl-quantize="numeric-verify=true log-if-failed=true" | FileCheck --check-prefix=DEBUG %s
    // RUN: tf-opt %s -tfl-prepare-quantize="post-training-quantize=true" -tfl-quantize="numeric-verify=true log-if-failed=true whole-model-verify=true" | FileCheck --check-prefix=MODEL-DEBUG %s
    
    // DEBUG-LABEL: QuantizeConv2D
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 15.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training-16bits.mlir

    // CHECK-DAG: %[[input_11:.*]] = "tfl.dequantize"({{.*}}) : (tensor<3x!quant.uniform<i32:f32, 4.8060645560249487E-8>>) -> tensor<3xf32>
    // CHECK-DAG: %[[input_12:.*]] = "tfl.dequantize"({{.*}}) : (tensor<3x!quant.uniform<i32:f32, 7.2090970130772759E-8>>) -> tensor<3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 26.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/quantize-dynamic-range.mlir

    // RUN: tf-opt %s -tfl-prepare-quantize-dynamic-range -tfl-quantize="enable-dynamic-range-quantization=true" | FileCheck %s
    // RUN: tf-opt %s -tfl-prepare-quantize-dynamic-range -tfl-quantize="enable-dynamic-range-quantization=true enable-weight-only-quantization=true" | FileCheck --check-prefix=PerChannelWeightOnly %s
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 23 21:09:00 UTC 2024
    - 23.2K bytes
    - Viewed (0)
Back to top