Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 85 for dequantize (0.16 sec)

  1. tensorflow/compiler/mlir/lite/transforms/quantize_patterns.td

    include "tensorflow/compiler/mlir/lite/ir/tfl_ops.td"
    
    // Quantize attribute $0 by using quantization parameter from %1.
    def QuantizeByQuantizedType : NativeCodeCall<"quant::Quantize($0, $1.getValue())">;
    def F32ElementsAttr : ElementsAttrBase<
      CPred<"$_self.cast<ElementsAttr>().getShapedType().getElementType().isF32()">, "float constant tensor">;
    
    // Squash tfl.dequantize and tfl.quantize pairs.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 23:10:13 UTC 2024
    - 2.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.h

    // After applying the function, a quantize/dequantize functions are created
    // where the body of each function contains a specific quantization algorithm.
    // The input of the quantize function has one operand of
    // IsValueWithQuantizablePrecision and the output is a tensor with supported
    // quantized precision (like int8). For dequantize function, it is the other way
    // around.
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Mar 24 07:44:40 UTC 2024
    - 1.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/propagate_quantize_type.cc

        auto op_before_dequantize = original_dequantize_op.getOperand(0);
    
        // Create a new dequantize op that is propagated.
        rewriter.setInsertionPointAfter(user_op);
        TF::PartitionedCallOp new_dequantize_op =
            cast<TF::PartitionedCallOp>(rewriter.clone(*original_dequantize_op));
    
        // Skip the original dequant op and connect the op before dequantize to the
        // user op.
        user_op->setOperand(user_idx, op_before_dequantize);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/passes/post_quantize.cc

                op->user_begin()->hasTrait<OpTrait::IsTerminator>())
              return failure();
          }
          // If the quantize op is a requantize op, it is being used in other scale
          // adjustments and should be kept. Instead, moving dequantize op before
          // the requantize op to remove the unnecessary requantize op.
          if (auto qtype =
                  QuantizedType::getQuantizedElementType(q.getArg().getType())) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 5.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/depthwise_conv2d_v2.mlir

      // CHECK:      {
      // CHECK-NEXT:  version: 3,
      // CHECK-NEXT:  operator_codes: [ {
      // CHECK-NEXT:    deprecated_builtin_code: 6,
      // CHECK-NEXT:    version: 1,
      // CHECK-NEXT:    builtin_code: DEQUANTIZE
      // CHECK-NEXT:  }, {
      // CHECK-NEXT:    deprecated_builtin_code: 4,
      // CHECK-NEXT:    version: 2,
      // CHECK-NEXT:    builtin_code: DEPTHWISE_CONV_2D
      // CHECK-NEXT:  } ],
      // CHECK-NEXT:  subgraphs: [ {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 14 16:41:28 UTC 2022
    - 9.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/utils/fake_quant_utils.h

        // Finally, use the quantization parameter to create the quantize and
        // dequantize ops, and insert them between the tf.FakeQuantWithMinMaxVarsOp
        // and its users.
        auto quantize = rewriter.create<quantfork::QuantizeCastOp>(
            tf_op.getLoc(), qtype.getValue(), input);
        auto dequantize = rewriter.create<quantfork::DequantizeCastOp>(
            tf_op.getLoc(), res_type, quantize.getResult());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/depthwise_conv2d.mlir

      // CHECK:      {
      // CHECK-NEXT:  version: 3,
      // CHECK-NEXT:  operator_codes: [ {
      // CHECK-NEXT:    deprecated_builtin_code: 6,
      // CHECK-NEXT:    version: 1
      // CHECK-NEXT:    builtin_code: DEQUANTIZE
      // CHECK-NEXT:  }, {
      // CHECK-NEXT:    deprecated_builtin_code: 4,
      // CHECK-NEXT:    version: 1
      // CHECK-NEXT:    builtin_code: DEPTHWISE_CONV_2D
      // CHECK-NEXT:  } ],
      // CHECK-NEXT:  subgraphs: [ {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 14 16:41:28 UTC 2022
    - 9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/passes/post_quantize.cc

          if (!q->getAttr(kVolatileOpAttrName)) return failure();
    
          // If the quantize op is a requantize op, it is being used in other scale
          // adjustments and should be kept. Instead, move dequantize op before the
          // requantize op to remove the unnecessary requantize op.
          if (const QuantizedType qtype =
                  QuantizedType::getQuantizedElementType(q.getArg().getType())) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/utils/fake_quant_utils.cc

    // and tfl.dequantize pairs before tf.FakeQuant* being foled.
    LogicalResult ConvertFakeQuantOps(func::FuncOp func, MLIRContext* ctx,
                                      bool use_fake_quant_num_bits) {
      OpBuilder builder(func);
      if (failed(UnwrapTFCustomOps(func, builder))) {
        return failure();
      }
    
      // Insert the tfl.quantize/tfl.dequantize ops after the tf.FakeQuant* ops to
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Jun 03 00:14:05 UTC 2023
    - 4.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/experimental/tac/transforms/transform_patterns.td

                      (Arith_ConstantOp ConstantAttr<RankedF32ElementsAttr<[]>,
                       "-1.0f">), TFL_AF_None), $act)>;
    
    // Squash tfl.dequantize and tfl.quantize pairs.
    // TODO(b/185915462): Compare the scale of input and output. This can also be
    // squashed to a requantize op if the scales are different.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Sep 29 21:02:21 UTC 2022
    - 1.4K bytes
    - Viewed (0)
Back to top