Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 18 for QuantizeOp (0.2 sec)

  1. tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.cc

        return success();
      }
    };
    
    // If the quant op has no consumer, we will remove them.
    struct RemoveUnusedQuant : public OpRewritePattern<TFL::QuantizeOp> {
      using OpRewritePattern<TFL::QuantizeOp>::OpRewritePattern;
    
      LogicalResult matchAndRewrite(TFL::QuantizeOp quant_op,
                                    PatternRewriter& rewriter) const override {
        if (!quant_op.getResult().use_empty()) return failure();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/quantize.cc

    };
    
    class QuantizeConstPattern : public OpRewritePattern<QuantizeOp> {
     public:
      explicit QuantizeConstPattern(MLIRContext* context, bool legacy_float_scale)
          : OpRewritePattern<QuantizeOp>(context),
            legacy_float_scale_(legacy_float_scale) {}
      LogicalResult matchAndRewrite(QuantizeOp op,
                                    PatternRewriter& rewriter) const override {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 13.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/transforms/post_quantize.cc

          arg.dropAllUses();
          bb.eraseArgument(0);
        };
    
        // This is looking for a pattern: arg -> tfl.quantize
        if (arg.hasOneUse() && llvm::isa<QuantizeOp>(*arg.user_begin())) {
          auto quantize_op = llvm::cast<QuantizeOp>(*arg.user_begin());
          remove_quantize_op(quantize_op);
          continue;
        }
    
        // Make a copy of current argument and append it to the end of the list if
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/transforms/quantize_variables.cc

        auto read_variable_op = dyn_cast_or_null<ReadVariableOp>(var_handle_user);
        if (!read_variable_op) continue;
        for (auto *read_variable_user : read_variable_op.getResult().getUsers()) {
          auto q_op = dyn_cast_or_null<QuantizeOp>(read_variable_user);
          if (!q_op || ref_qtype) continue;
          ref_qtype = q_op.getResult().getType();
        }
      }
      return ref_qtype;
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/quantization/lite/tfl_to_std.cc

          auto dcast = b.create<quantfork::DequantizeCastOp>(
              dq.getLoc(), dq.getOutput().getType(), dq.getInput());
          dq.getOutput().replaceAllUsesWith(dcast);
          dq.erase();
        } else if (auto q = llvm::dyn_cast<QuantizeOp>(op)) {
          auto qcast = b.create<quantfork::QuantizeCastOp>(
              q.getLoc(), q.getOutput().getType(), q.getInput());
          q.getOutput().replaceAllUsesWith(qcast);
          q.erase();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 22 02:50:01 UTC 2024
    - 3.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/experimental/tac/common/utils.cc

    #include "tensorflow/compiler/mlir/lite/utils/utils.h"
    
    namespace mlir {
    namespace TFL {
    namespace tac {
    
    bool NotTFLQuantDequantizeOp(Operation* op) {
      if (!op) return false;
      if (llvm::isa<TFL::QuantizeOp, TFL::DequantizeOp>(op)) return false;
      return true;
    }
    
    bool IsTerminatorOp(Operation* op) {
      if (!op) return false;
      return op->hasTrait<OpTrait::IsTerminator>();
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 06 05:37:07 UTC 2024
    - 2.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/transforms/modify_io_nodes.cc

        Value arg = block.getArgument(0);
        Type arg_type = arg.getType();
        Value new_arg = arg;
        Location loc = func.getLoc();
        if (arg.hasOneUse() && llvm::isa<QuantizeOp>(*arg.user_begin())) {
          auto quantize_op = llvm::cast<QuantizeOp>(*arg.user_begin());
          auto quantize_output = quantize_op.getOutput();
          auto current_type = quant::QuantizedType::getQuantizedElementType(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.h

    // A base rewrite pattern which matches any N-in-M-out operations with
    // quantization parameters propagated to at least one of its operands. The
    // quantization parameters are annotated by the QuantizeOp/DequantizeOp pairs.
    // Each matched pattern are rewritten by its quantized alternatives.
    //
    // Quantization method is determined by the `_quantization_method` attributes
    // attached to each quantizable units.
    //
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/utils/fake_quant_utils.h

                                    OpBuilder &rewriter) const {
        // We don't want to insert quantize/dequantize if the quantize op exists.
        auto res = tf_op.getOutputs();
        if (!res.hasOneUse() || isa<QuantizeOp>(*res.user_begin())) {
          return failure();
        }
    
        // Extract the min/max constant values from the operands. We also consider
        // a special case that there are tf.Identity ops between the min/max
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/utils/convert_type.cc

      } else {
        return type;
      }
    }
    
    bool NotFromQuantOpOrSameQuantType(mlir::Value val, mlir::TypeAttr qtype_attr) {
      auto val_defn_op = val.getDefiningOp();
      mlir::TFL::QuantizeOp q_op =
          llvm::dyn_cast_or_null<mlir::TFL::QuantizeOp>(val_defn_op);
      if (!q_op) return true;
    
      // Ignore shape details - we're really only trying to
      // check if quantization is the same.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 07 23:04:40 UTC 2024
    - 8.2K bytes
    - Viewed (0)
Back to top