Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 81 for dequantize (0.3 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td

            "MLIR dump file name.">,
        Option<"merge_fusion_with_dequantize_",
            "merge-fusion-with-dequantize",
            "bool", /*default=*/"false",
            "Whether to merge quantized conv/dot_general fusion with subsequent dequantize.">,
      ];
      let dependentDialects = [
        "mlir::arith::ArithDialect",
        "mlir::stablehlo::StablehloDialect",
        "mlir::quant::QuantizationDialect",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 10.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc

                               TFDynamicRangeQuantization>(ctx, quant_params) {}
    };
    
    // Removes quantize-dequantize pairs that are not used in the quantization.
    // The benefit of this pattern is set to lower value than other patterns, so
    // that the other patterns can work on quantize/dequantize ops first.
    class RemoveUnusedQdqPattern
        : public OpRewritePattern<quantfork::DequantizeCastOp> {
     public:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 05:52:39 UTC 2024
    - 23.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-quant.mlir

      // CHECK: %[[DEQUANTIZE:.*]] = mhlo.uniform_dequantize %[[CONVERT_2]] : (tensor<2x!quant.uniform<i8:f32, 1.000000e+00:3>>) -> tensor<2xf32>
      // CHECK: return %[[DEQUANTIZE]] : tensor<2xf32>
    
      %0 = "tf.UniformQuantize"(%arg0, %scales, %zps) {
        quantization_axis = -1 : i64, quantization_min_val = -128 : i64, quantization_max_val = 127 : i64
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 01:25:29 UTC 2024
    - 37.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library.mlir

          } : (tensor<i8>, tensor<*xf32>, tensor<*xi32>) -> tensor<*xf32>
    
        %clamp_max = "tf.Maximum"(%dequantize, %clip_min) : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
        %clamp_min = "tf.Minimum"(%clamp_max, %clip_max) : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
    
        func.return %clamp_min : tensor<*xf32>
      }
    
      // Dequantizes and applies quantized Relu by clipping.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jan 08 01:16:10 UTC 2024
    - 30.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h

            // correct float op should be the user of the last DequantizeOp.
            if (llvm::isa<QuantizeOpT>(user)) {
              user = *user->getResult(0).getUsers().begin();
            }
            if (auto dequantize = llvm::dyn_cast<DequantizeOpT>(user)) {
              // Replace all uses, except not quantizable ops that are being used in
              // the float backbone.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 41.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/modify_io_nodes.mlir

      %6 = "tfl.dequantize"(%5) : (tensor<1x401408x!quant.uniform<i8:f32, 3.906250e-03>>) -> tensor<1x401408xf32>
      func.return %6 : tensor<1x401408xf32>
    
    // CHECK-LABEL: func @modified(%arg0: tensor<1x224x224x3xf32>) -> tensor<1x401408xf32>
    // CHECK-NEXT: %[[shape:.*]] = arith.constant dense<[1, 401408]> : tensor<2xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 19.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc

      // TODO: b/323478683 - Make the attribute being part of op definition.
      quantize->setAttr(kVolatileOpAttrName, builder_.getUnitAttr());
    
      // `original_result` has a use to `quantize`, so this will replace that use
      // by the result of `dequantize`. Remember to reset that use afterwards
      value.replaceAllUsesWith(dequantize);
      quantize.getOperation()->replaceUsesOfWith(dequantize, value);
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 38.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized.mlir

        } : (tensor<*xf32>, tensor<*xf32>, tensor<*xi32>) -> tensor<*x!tf_type.qint32>
        func.return %quantize : tensor<*x!tf_type.qint32>
      }
    
      // Dequantize final graph output back to f32. Input is qint8.
      func.func @dequantize_i8(%input : tensor<*x!tf_type.qint8>, %input_scale : tensor<*xf32>, %input_zp : tensor<*xi32>) -> tensor<*xf32> {
        %dequantize = "tf.UniformDequantize"(%input, %input_scale, %input_zp) {
          Tin = "tfdtype$DT_QINT8",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Aug 29 01:13:58 UTC 2023
    - 19.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.h

        if (failed(candidate_ops) || candidate_ops->empty()) return failure();
    
        // Rewrite the floating-point ops to the quantized version, by fusing
        // preceding dequantize ops and succeding quantize ops.
        for (Operation* candidate_op : *candidate_ops) {
          // If it is requantize op, we shouldn't rewrite this op.
          if (isa<QuantizeOpT, DequantizeOpT>(candidate_op)) {
            return failure();
          }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10.9K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc

      quant::QuantizationSpecs quant_specs_;
    };
    
    #include "tensorflow/compiler/mlir/lite/utils/generated_op_quant_spec_getters.inc"
    
    // If the weight is applicable to dynamic range quantization, insert Quantize
    // and Dequantize ops with either per-axis or per-tensor scale.
    class PrepareDynamicRangeQuantizableOp
        : public OpRewritePattern<arith::ConstantOp> {
     public:
      explicit PrepareDynamicRangeQuantizableOp(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 20.8K bytes
    - Viewed (0)
Back to top