Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 203 for dequantize (0.33 sec)

  1. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/depthwise_conv2d.mlir

      // CHECK:      {
      // CHECK-NEXT:  version: 3,
      // CHECK-NEXT:  operator_codes: [ {
      // CHECK-NEXT:    deprecated_builtin_code: 6,
      // CHECK-NEXT:    version: 1
      // CHECK-NEXT:    builtin_code: DEQUANTIZE
      // CHECK-NEXT:  }, {
      // CHECK-NEXT:    deprecated_builtin_code: 4,
      // CHECK-NEXT:    version: 1
      // CHECK-NEXT:    builtin_code: DEPTHWISE_CONV_2D
      // CHECK-NEXT:  } ],
      // CHECK-NEXT:  subgraphs: [ {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 14 16:41:28 UTC 2022
    - 9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/passes/post_quantize.cc

          if (!q->getAttr(kVolatileOpAttrName)) return failure();
    
          // If the quantize op is a requantize op, it is being used in other scale
          // adjustments and should be kept. Instead, move dequantize op before the
          // requantize op to remove the unnecessary requantize op.
          if (const QuantizedType qtype =
                  QuantizedType::getQuantizedElementType(q.getArg().getType())) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/quantization.mlir

    // CHECK-NEXT:    version: 1,
    // CHECK-NEXT:    builtin_code: SOFTMAX
    // CHECK-NEXT:  }, {
    // CHECK-NEXT:    deprecated_builtin_code: 6,
    // CHECK-NEXT:    version: 1,
    // CHECK-NEXT:    builtin_code: DEQUANTIZE
    // CHECK-NEXT:  } ],
    // CHECK-NEXT:  subgraphs: [ {
    // CHECK-NEXT:    tensors: [ {
    // CHECK-NEXT:      shape: [ 1, 224, 224, 3 ],
    // CHECK-NEXT:      buffer: 1,
    // CHECK-NEXT:      name: "arg0",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 14 16:41:28 UTC 2022
    - 11.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td

            "MLIR dump file name.">,
        Option<"merge_fusion_with_dequantize_",
            "merge-fusion-with-dequantize",
            "bool", /*default=*/"false",
            "Whether to merge quantized conv/dot_general fusion with subsequent dequantize.">,
      ];
      let dependentDialects = [
        "mlir::arith::ArithDialect",
        "mlir::stablehlo::StablehloDialect",
        "mlir::quant::QuantizationDialect",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 10.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/utils/fake_quant_utils.cc

    // and tfl.dequantize pairs before tf.FakeQuant* being foled.
    LogicalResult ConvertFakeQuantOps(func::FuncOp func, MLIRContext* ctx,
                                      bool use_fake_quant_num_bits) {
      OpBuilder builder(func);
      if (failed(UnwrapTFCustomOps(func, builder))) {
        return failure();
      }
    
      // Insert the tfl.quantize/tfl.dequantize ops after the tf.FakeQuant* ops to
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Jun 03 00:14:05 UTC 2023
    - 4.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc

                               TFDynamicRangeQuantization>(ctx, quant_params) {}
    };
    
    // Removes quantize-dequantize pairs that are not used in the quantization.
    // The benefit of this pattern is set to lower value than other patterns, so
    // that the other patterns can work on quantize/dequantize ops first.
    class RemoveUnusedQdqPattern
        : public OpRewritePattern<quantfork::DequantizeCastOp> {
     public:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 05:52:39 UTC 2024
    - 23.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/experimental/tac/transforms/transform_patterns.td

                      (Arith_ConstantOp ConstantAttr<RankedF32ElementsAttr<[]>,
                       "-1.0f">), TFL_AF_None), $act)>;
    
    // Squash tfl.dequantize and tfl.quantize pairs.
    // TODO(b/185915462): Compare the scale of input and output. This can also be
    // squashed to a requantize op if the scales are different.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Sep 29 21:02:21 UTC 2022
    - 1.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-quant.mlir

      // CHECK: %[[DEQUANTIZE:.*]] = mhlo.uniform_dequantize %[[CONVERT_2]] : (tensor<2x!quant.uniform<i8:f32, 1.000000e+00:3>>) -> tensor<2xf32>
      // CHECK: return %[[DEQUANTIZE]] : tensor<2xf32>
    
      %0 = "tf.UniformQuantize"(%arg0, %scales, %zps) {
        quantization_axis = -1 : i64, quantization_min_val = -128 : i64, quantization_max_val = 127 : i64
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 01:25:29 UTC 2024
    - 37.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/post_quantize_patterns.td

    include "mlir/IR/OpBase.td"
    include "mlir/IR/PatternBase.td"
    include "mlir/Dialect/Func/IR/FuncOps.td"
    include "tensorflow/compiler/mlir/lite/ir/tfl_ops.td"
    
    // Both Quantize and Dequantize ops have side effects, so we have to define
    // patterns to remove dead ones after the quantization rewrite.
    def : Pat<(TFL_QuantizeOp:$op $in, $qt), (replaceWithValue $in), [(HasNoUseOf:$op)]>;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 16 23:20:46 UTC 2022
    - 1.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library.mlir

          } : (tensor<i8>, tensor<*xf32>, tensor<*xi32>) -> tensor<*xf32>
    
        %clamp_max = "tf.Maximum"(%dequantize, %clip_min) : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
        %clamp_min = "tf.Minimum"(%clamp_max, %clip_max) : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
    
        func.return %clamp_min : tensor<*xf32>
      }
    
      // Dequantizes and applies quantized Relu by clipping.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jan 08 01:16:10 UTC 2024
    - 30.6K bytes
    - Viewed (0)
Back to top