Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 8 of 8 for kVolatileOpAttrName (0.32 sec)

  1. tensorflow/compiler/mlir/lite/quantization/lite/tfl_to_std.cc

                                              dq.getArg());
          dq.getResult().replaceAllUsesWith(dcast);
          if (auto extra_attr = op->getAttr(mlir::quant::kVolatileOpAttrName)) {
            dcast->setAttr(mlir::quant::kVolatileOpAttrName, extra_attr);
          }
          dq.erase();
        } else if (auto q = llvm::dyn_cast<quantfork::QuantizeCastOp>(op)) {
          auto out_type = q.getResult().getType();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 22 02:50:01 UTC 2024
    - 3.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/passes/post_quantize.cc

                                    PatternRewriter& rewriter) const override {
        auto input_op = op.getArg().getDefiningOp();
        if (auto q = llvm::dyn_cast_or_null<quantfork::QuantizeCastOp>(input_op)) {
          if (!q->getAttr(kVolatileOpAttrName)) return failure();
    
          // If the quantize op is a requantize op, it is being used in other scale
          // adjustments and should be kept. Instead, move dequantize op before the
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/post_quantize.cc

                                    PatternRewriter& rewriter) const override {
        auto input_op = op.getArg().getDefiningOp();
        if (auto q = llvm::dyn_cast_or_null<quantfork::QuantizeCastOp>(input_op)) {
          if (!q->getAttr(kVolatileOpAttrName)) return failure();
    
          if (remove_volatile_ops_type == kPreserveInputsAndOutputs) {
            // Don't remove leading and trailing QDQ for PTQ workflow, so the io
            // modifying lib can work correctly.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 5.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/passes/prepare_quantize.cc

        if (!preceding_qcast) return failure();
    
        auto new_qcast = rewriter.create<quantfork::QuantizeCastOp>(
            q_op.getLoc(), q_op.getType(), preceding_qcast.getArg());
        new_qcast->setAttr(kVolatileOpAttrName, rewriter.getUnitAttr());
        q_op->replaceAllUsesWith(new_qcast);
        return success();
      }
    };
    
    class ConvertTFConstOpToArithConstOp : public OpRewritePattern<TF::ConstOp> {
     public:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 03 05:11:03 UTC 2024
    - 8.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.cc

        if (!preceding_qcast) return failure();
    
        auto new_qcast = rewriter.create<quantfork::QuantizeCastOp>(
            q_op.getLoc(), q_op.getType(), preceding_qcast.getArg());
        new_qcast->setAttr(kVolatileOpAttrName, rewriter.getUnitAttr());
        q_op->replaceAllUsesWith(new_qcast);
        return success();
      }
    };
    
    bool PrepareQuantizePass::ContainsQuantizeOps(func::FuncOp func) {
      for (const auto& op : func.getOps()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/transforms/post_quantize.cc

        auto input_op = op.getInput().getDefiningOp();
        if (auto q = llvm::dyn_cast_or_null<QuantizeOp>(input_op)) {
          if (!q->getAttr(mlir::quant::kVolatileOpAttrName)) return failure();
    
          if (remove_volatile_ops_type == kPreserveInputsAndOutputs) {
            // Don't remove leading and trailing QDQ for PTQ workflow, so the io
            // modifying lib can work correctly.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.1K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h

    namespace quant {
    
    // A unit attribute can be attached to the quantize/dequantize ops which are
    // added by the quantization passes. These ops can be removed erased without
    // losing accuracy.
    inline constexpr char kVolatileOpAttrName[] = "volatile";
    
    // Following attributes are used to mark ops that are not quantizable during
    // debug model generation process for whole-model verify mode. If these
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 41.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc

      // quantization pass. These ops can be removed without losing original
      // program accuracy.
      // TODO: b/323478683 - Make the attribute being part of op definition.
      quantize->setAttr(kVolatileOpAttrName, builder_.getUnitAttr());
    
      // `original_result` has a use to `quantize`, so this will replace that use
      // by the result of `dequantize`. Remember to reset that use afterwards
      value.replaceAllUsesWith(dequantize);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 38.1K bytes
    - Viewed (0)
Back to top