Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 41 for requantize (0.23 sec)

  1. tensorflow/compiler/mlir/lite/transforms/quantize_patterns.td

    include "tensorflow/compiler/mlir/lite/ir/tfl_ops.td"
    
    // Quantize attribute $0 by using quantization parameter from %1.
    def QuantizeByQuantizedType : NativeCodeCall<"quant::Quantize($0, $1.getValue())">;
    def F32ElementsAttr : ElementsAttrBase<
      CPred<"$_self.cast<ElementsAttr>().getShapedType().getElementType().isF32()">, "float constant tensor">;
    
    // Squash tfl.dequantize and tfl.quantize pairs.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 23:10:13 UTC 2024
    - 2.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/passes/merge_fusion_with_dequantize.cc

        auto func_op =
            dyn_cast_or_null<func::FuncOp>(symbol_table.lookup(func_name));
        if (!func_op) return failure();
        // The quantized fusion should have requantize and return ops at the end.
        auto return_op = dyn_cast_or_null<func::ReturnOp>(
            func_op.getRegion().getBlocks().front().getTerminator());
        if (!return_op) return failure();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/quantize-dynamic-range-float16.mlir

      // CHECK: %[[DQ_1:.*]] = "tfl.dequantize"({{.*}}) : (tensor<1x1xf16>) -> tensor<1x1xf32>
      // CHECK: %[[DQ_2:.*]] = "tfl.dequantize"({{.*}}) : (tensor<1x1xf16>) -> tensor<1x1xf32>
      // CHECK: %[[DQ_3:.*]] = "tfl.dequantize"({{.*}}) : (tensor<1x1xf16>) -> tensor<1x1xf32>
      // CHECK: %[[DQ_4:.*]] = "tfl.dequantize"({{.*}}) : (tensor<1x1xf16>) -> tensor<1x1xf32>
      // CHECK: %[[DQ_5:.*]] = "tfl.dequantize"({{.*}}) : (tensor<1x1xf16>) -> tensor<1x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 4.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/utils/fake_quant_utils.h

        // dequantize ops, and insert them between the tf.FakeQuantWithMinMaxVarsOp
        // and its users.
        Value value = tf_op.getOutputs();
        auto quantize = rewriter.create<TFL::QuantizeOp>(
            tf_op.getLoc(), qtype.getValue(), value, qtype);
        auto dequantize = rewriter.create<TFL::DequantizeOp>(
            tf_op.getLoc(), res_type, quantize.getOutput());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/default_quant_params.mlir

        %0 = "tfl.dequantize"(%arg0) : (tensor<1x224x224x3x!quant.uniform<u8:f32, 1.0>>) -> tensor<1x224x224x3xf32>
        %1 = "tfl.dequantize"(%arg1) : (tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 1.0>>) -> tensor<32x3x3x3xf32>
        %2 = "tfl.dequantize"(%arg2) : (tensor<32x!quant.uniform<i32:f32, 1.0>>) -> tensor<32xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 8.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/quantization/tensorflow/tf_to_quant.cc

      }
    };
    
    // Inserts a "tfl.quantize" and "tfl.dequantize" op pair (QDQs) after the
    // "tf.FakeQuantWithMinMaxVarsOp" to be constant folded. Since the constant
    // folding logic will use a "arith.constant" op to replace the
    // "tf.FakeQuantWithMinMaxVarsOp", the "tfl.quantize" op is used to preserve
    // the quantization parameters as a TypeAttr and "tfl.dequantize" op used to
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.1K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize.mlir

    // RUN: stablehlo-quant-opt %s -split-input-file -stablehlo-quantize -verify-each=false | FileCheck %s
    
    // Tests for PopulateFusedGemmStylePatterns are handled in
    // quantize_composite_functions for module-level evaluation of functions.
    
    module attributes {tf_saved_model.semantics} {
    // CHECK: quantize_simple_xla_call_module(%[[ARG_0:.+]]: tensor<1x4xf32>)
      func.func private @quantize_simple_xla_call_module(%arg0: tensor<1x4xf32>) -> tensor<1x3xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 01:38:40 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize.mlir

    // RUN: tf-quant-opt %s -split-input-file -quant-lift-quantizable-spots-as-functions -quant-quantize -verify-each=false | FileCheck %s
    
    func.func private @conv(%input: tensor<1x3x4x3xf32> {tf._user_specified_name = "input_tensor"}) -> tensor<*xf32> attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf_type.shape<1x3x4x3>]} {
      %weight = arith.constant dense_resource<__elided__> : tensor<2x3x3x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 19:32:28 UTC 2024
    - 6.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantize.cc

      patterns.add<StableHloQuantization, StableHloQuantizationReverse>(&ctx);
    
      PopulateCommonQuantizationPatterns(ctx, patterns,
                                         enable_per_channel_quantized_weight_);
    
      // Quantize all quantizable ops, including ops that are not compute-heavy.
      PopulateAllQuantizablePatterns(ctx, patterns);
    
      if (failed(applyPatternsAndFoldGreedily(module_op, std::move(patterns)))) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 22 07:08:19 UTC 2024
    - 5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/quantization.mlir

    // CHECK:   %{{.*}} = "tfl.dequantize"(%{{.*}}) : (tensor<1x401408x!quant.uniform<u8:f32, 3.906250e-03>>) -> tensor<1x401408xf32>
    
      %cst = arith.constant dense<[1, 401408]> : tensor<2xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 4.3K bytes
    - Viewed (0)
Back to top