Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 131 for dequantize (0.29 sec)

  1. tensorflow/compiler/mlir/lite/tests/prepare-quantize-dynamic-range.mlir

    // CHECK-DAG: %[[q_w1:.*]] = "tfl.quantize"(%[[w]]) <{qtype = tensor<64x3x3x3x!quant.uniform<i8<-127:127>:f32:3, {1.000000e+00,1.000000e+00,1.000000e+00}>
    // CHECK-DAG: %[[q_w2:.*]] = "tfl.quantize"(%[[w]]) <{qtype = tensor<64x3x3x3x!quant.uniform<i8<-127:127>:f32:0, {1.000000e+00,1.000000e+00,1.000000e+00
    // CHECK-DAG: %[[dq_w1:.*]] = "tfl.dequantize"(%[[q_w1]])
    // CHECK-DAG: %[[dq_w2:.*]] = "tfl.dequantize"(%[[q_w2]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 38.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/quantization/tensorflow/tf_to_quant.cc

      }
    };
    
    // Inserts a "tfl.quantize" and "tfl.dequantize" op pair (QDQs) after the
    // "tf.FakeQuantWithMinMaxVarsOp" to be constant folded. Since the constant
    // folding logic will use a "arith.constant" op to replace the
    // "tf.FakeQuantWithMinMaxVarsOp", the "tfl.quantize" op is used to preserve
    // the quantization parameters as a TypeAttr and "tfl.dequantize" op used to
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/quantize-dynamic-range.mlir

    // RUN: tf-opt %s -tfl-prepare-quantize-dynamic-range -tfl-quantize="enable-dynamic-range-quantization=true" | FileCheck %s
    // RUN: tf-opt %s -tfl-prepare-quantize-dynamic-range -tfl-quantize="enable-dynamic-range-quantization=true enable-weight-only-quantization=true" | FileCheck --check-prefix=PerChannelWeightOnly %s
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 23 21:09:00 UTC 2024
    - 23.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/prepare-tf-fake-quant.mlir

      func.return %rst : tensor<8xf32>
    
    // CHECK: %[[CONSTANT:.*]] = arith.constant dense<0.000000e+00> : tensor<8xf32>
    // CHECK: %[[QUANTIZE:.*]] = "tfl.quantize"(%[[CONSTANT]]) <{qtype = tensor<8x!quant.uniform<u8:f32, 1.000000e+00>>}>
    // CHECK: %[[DEQUANTIZE:.*]] = "tfl.dequantize"(%[[QUANTIZE]])
    // CHECK: return %[[DEQUANTIZE]] : tensor<8xf32>
    }
    
    // CHECK-LABEL: fakeQuantFolded
    func.func @fakeQuantFolded() -> (tensor<8xf32>) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/prepare-tf-fake-quant-4bit.mlir

      func.return %rst : tensor<8xf32>
    
    // CHECK: %[[CONSTANT:.*]] = arith.constant dense<0.000000e+00> : tensor<8xf32>
    // CHECK: %[[QUANTIZE:.*]] = "tfl.quantize"(%[[CONSTANT]]) <{qtype = tensor<8x!quant.uniform<u4:f32, 1.000000e+00>>}>
    // CHECK: %[[DEQUANTIZE:.*]] = "tfl.dequantize"(%[[QUANTIZE]])
    // CHECK: return %[[DEQUANTIZE]] : tensor<8xf32>
    }
    
    // CHECK-LABEL: fakeQuantFolded
    func.func @fakeQuantFolded() -> (tensor<8xf32>) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 22K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/post-quantize.mlir

      %2 = "tfl.dequantize"(%1#0) : (tensor<2x!quant.uniform<u8:f32, 1.0>>) -> tensor<2xf32>
      %3 = "tfl.dequantize"(%1#1) : (tensor<2x!quant.uniform<u8:f32, 1.0>>) -> tensor<2xf32>
    
      // unused quantization ops should be removed as well.
      %4 = "tfl.dequantize"(%1#2) : (tensor<2x!quant.uniform<u8:f32, 1.0>>) -> tensor<2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 19.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/post-quantize-dynamic-range.mlir

    // RUN: tf-opt %s -tfl-prepare-quantize-dynamic-range="enable-custom-op-quantization=CustomTestOp=1" -tfl-quantize="enable-dynamic-range-quantization=true enable-custom-op-weight-only=CustomTestOp=false" -tfl-post-quantize="enable-no-side-effect=CustomTestOp=false" | FileCheck --check-prefix=NotPrune %s
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 11.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/transforms/optimize_op_order.cc

    // Dequantize ops will produce 3x larger tensors, so we want to move it after
    // some passthrough ops to reduce the memory consumption.
    struct PushDownDequantize : public OpRewritePattern<DequantizeOp> {
      explicit PushDownDequantize(MLIRContext* context)
          : OpRewritePattern<DequantizeOp>(context) {}
    
      LogicalResult matchAndRewrite(DequantizeOp dequantize_op,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/decompose-hybrid-quantization.mlir

      // CHECK-DAG: %[[VAL2:.+]] = "tfl.dequantize"(%[[VAL0]])
      // CHECK-DAG: %[[VAL3:.+]] = "tfl.dequantize"(%[[VAL1]])
      // CHECK-DAG: %[[VAL4:.+]] = "tfl.conv_2d"(%arg0, %[[VAL2]], %[[VAL3]]) <{dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32}>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/transforms/default_quant_params.cc

      }
      TypeAttr type_attr = TypeAttr::get(new_type);
      auto quantize = builder.create<TFL::QuantizeOp>(value.getLoc(), new_type,
                                                      value, type_attr);
      auto dequantize = builder.create<TFL::DequantizeOp>(
          value.getLoc(), expressed_type, quantize.getOutput());
      value.replaceAllUsesWith(dequantize);
    
      // `quantize` is using `dequantize` now, so we should set its operand to
      // `value`.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.4K bytes
    - Viewed (0)
Back to top