Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 40 for getDefiningOp (0.32 sec)

  1. tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.cc

          >();
    }
    
    OpFoldResult StorageCastOp::fold(FoldAdaptor) {
      // Matches x -> [scast -> scast] -> y, replacing the second scast with the
      // value of x if the casts invert each other.
      auto srcScastOp = getArg().getDefiningOp<StorageCastOp>();
      if (!srcScastOp || srcScastOp.getArg().getType() != getType())
        return OpFoldResult();
      return srcScastOp.getArg();
    }
    
    /// The quantization specification should match the expressed type.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/utils/utils.td

    // of our files will have access to `OpHasSameStaticShapes` when including files
    // generated from table-gen.
    def OpHasSameStaticShapesPred : CPred<"OpHasSameStaticShapes($0.getDefiningOp())">;
    def OpHasSameStaticShapes : Constraint<OpHasSameStaticShapesPred, "op must have static same input shapes">;
    def OpHasNotSameStaticShapes : Constraint<Neg<OpHasSameStaticShapesPred>, "op must have not static same input shapes">;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 00:40:15 UTC 2024
    - 4.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/fold_broadcast.cc

      for (uint64_t i = 0, e = op->getNumOperands(); i < e; ++i) {
        // Check that the i'th operand is a broadcast.
        auto broadcast = llvm::dyn_cast_or_null<TF::BroadcastToOp>(
            op->getOpOperand(i).get().getDefiningOp());
        if (!broadcast) continue;
    
        // Check that the operand of the broadcast has fully defined shape.
        auto broadcast_arg_type = mlir::dyn_cast_or_null<RankedTensorType>(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.cc

      LogicalResult matchAndRewrite(TFL::DequantizeOp dequant_op,
                                    PatternRewriter& rewriter) const override {
        // We only fold i32 -> float pattern.
        auto input = dequant_op.getInput().getDefiningOp();
        if (!input) return failure();
    
        auto input_dequant = llvm::dyn_cast_or_null<TFL::QConstOp>(input);
        if (!input_dequant) return failure();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/utils/convert_type.cc

      if (shaped_type) {
        return shaped_type.getElementType();
      } else {
        return type;
      }
    }
    
    bool NotFromQuantOpOrSameQuantType(mlir::Value val, mlir::TypeAttr qtype_attr) {
      auto val_defn_op = val.getDefiningOp();
      mlir::TFL::QuantizeOp q_op =
          llvm::dyn_cast_or_null<mlir::TFL::QuantizeOp>(val_defn_op);
      if (!q_op) return true;
    
      // Ignore shape details - we're really only trying to
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 07 23:04:40 UTC 2024
    - 8.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/transforms/replicate_invariant_op_hoisting.cc

        shape_op.setOperand(replicate_op.GetReplicaOperandForBlockArgument(
            block_arg, /*replica=*/0));
    
        return;
      }
    
      Operation* input_def = input.getDefiningOp();
    
      // If ShapeOp operand is a ReadVariableOp result where the ReadVariableOp
      // operand is a replicate resource block argument, replace ShapeOp with
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/transforms/extract_tpu_copy_with_dynamic_shape_op.cc

    // returns the owner of the Block.
    Operation* GetOpOfValue(Value value) {
      if (auto block_arg = mlir::dyn_cast<BlockArgument>(value))
        return block_arg.getOwner()->getParentOp();
    
      return value.getDefiningOp();
    }
    
    // Check if the TPUCopyWithDynamicShapeOp is valid.
    // 1. The op should be wrapped inside a launch op.
    // 2. The wrapped launch op should be placed on CPU.
    LogicalResult CheckOpIsValid(Operation* op) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/passes/convert_func_to_bfloat16.cc

      if (!converter.isLegal(convert_op.getOperand().getType())) {
        auto other_convert_op = dyn_cast_or_null<OtherConvertOp>(
            convert_op.getOperand().getDefiningOp());
        return other_convert_op &&
               converter.isLegal(other_convert_op.getOperand().getType());
      } else if (!converter.isLegal(convert_op.getResult().getType())) {
        if (!convert_op.getResult().hasOneUse()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/modify_io_nodes.cc

      for (int i = 0; i != num_return_operands; ++i) {
        auto returned_value = terminator->getOperand(i);
        Type returned_type = returned_value.getType();
        Operation* returned_op = returned_value.getDefiningOp();
        if (returned_op && llvm::isa<DequantizeOp>(returned_op)) {
          auto dequantize_op = llvm::cast<DequantizeOp>(returned_op);
          auto dequantize_input = dequantize_op.getInput();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.9K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/stablehlo/odml_converter/transforms/outline_composites.cc

    Operation* GetInputOpWithOneUse(Operation* op, int opr_num) {
      if (opr_num >= op->getNumOperands()) return nullptr;
      auto opr = op->getOperand(opr_num);
      if (llvm::isa<BlockArgument>(opr)) return nullptr;
      auto* res = opr.getDefiningOp();
      if (!res->hasOneUse()) return nullptr;
      return res;
    }
    
    // Checks if the given operand of given operation refers to a splat constant
    // with given val.
    bool HasSplatArg(Operation* op, float val, int opr_num) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 9.6K bytes
    - Viewed (0)
Back to top