Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 54 for CastOp (0.1 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_types.cc

          }
          // If the result is already consumed by qint->int CastOp, manually replace
          // its use by the new UQ op. This is because such CastOp is already legal,
          // it will not go through any conversion pattern later. Without this, that
          // CastOp will still be consuming the original UQ op and cause errors.
          op->getResult(i).replaceUsesWithIf(
              new_op->getResult(i), [](OpOperand &operand) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 12.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc

          }
    
          return true;
        }
        return false;
      }
    
      // Insert CastOp which is used to for converting float32 ConstantOp into
      // float16 quantization. If there is an existing CastOp connected to the
      // ConstantOp, the quantize_op will be rewired to the existing CastOp. This
      // guarantees at most one CastOp is created for float32 to float16 conversion.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 20.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tfr/ir/tfr_ops.cc

    }
    
    class RemoveRedundantCast : public OpRewritePattern<CastOp> {
      using OpRewritePattern<CastOp>::OpRewritePattern;
    
     public:
      LogicalResult matchAndRewrite(CastOp cast_op,
                                    PatternRewriter &rewriter) const override {
        auto preceding_cast =
            llvm::dyn_cast_or_null<CastOp>(cast_op.getArg().getDefiningOp());
        if (!preceding_cast) {
          return failure();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Nov 21 16:55:41 UTC 2023
    - 38.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/passes/cast_bf16_ops_to_f32.cc

     public:
      explicit CastBf16OpsToF32(MLIRContext* context)
          : RewritePattern(MatchAnyOpTypeTag(), /*benefit=*/1, context) {}
    
     private:
      LogicalResult match(Operation* op) const override {
        if (isa<TF::CastOp, TF::ConstOp>(op) ||
            op->getName().hasTrait<OpTrait::ZeroOperands>()) {
          return failure();
        }
        for (Value input : op->getOperands()) {
          if (getElementTypeOrSelf(input).isBF16()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Dec 10 05:52:02 UTC 2023
    - 4.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tfr/passes/raise_to_tf.cc

      // type.
      // TODO(fengliuai): This method is required when the operand types are not set
      // by the frontend correctly.
      Value CastToNonDerivedType(PatternRewriter& rewriter, Location loc,
                                 CastOp cast_op, Type input_tfr_type) const {
        auto tensor_type = mlir::dyn_cast<TFRTensorType>(input_tfr_type);
        if (!tensor_type) return cast_op.getArg();
    
        auto attr_names = tensor_type.getAttrKeys();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 21.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/transforms/tpu_space_to_depth_pass.cc

    // PadOp and CastOp.
    std::optional<BlockArgumentInfo> GetInputBlockArgNum(Value input) {
      auto block_arg_num = GetBlockArgNum(input);
      if (block_arg_num.has_value()) return block_arg_num;
    
      Value next_input = input;
      auto pad_op = dyn_cast_or_null<TF::PadOp>(next_input.getDefiningOp());
      auto cast_op = dyn_cast_or_null<TF::CastOp>(next_input.getDefiningOp());
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 29.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/cc/constant_fold_test.cc

      auto mul_op = dyn_cast_or_null<TF::MulOp>(results[0].getDefiningOp());
      EXPECT_THAT(mul_op, NotNull());
      // Even though the preceding CastOp is foldable, it shouldn't be folded since
      // we are calling from the MulOp.
      EXPECT_TRUE(isa<TF::CastOp>(mul_op.getX().getDefiningOp()));
    }
    
    TEST_F(ConstantFoldingTest, NotFoldingArgument) {
      constexpr absl::string_view kModuleCode = R"mlir(
        module {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 04 07:19:09 UTC 2024
    - 10.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.cc

        auto block_shape_i64 =
            rewriter.create<CastOp>(loc, block_shape_i64_type, op.getBlockShape());
    
        auto paddings_i64_type = tensorflow::GetTypeFromTFTensorShape(
            paddings_type.getShape(), rewriter.getIntegerType(64));
        auto paddings_i64 =
            rewriter.create<CastOp>(loc, paddings_i64_type, op.getPaddings());
    
        auto pad00 = rewriter.create<ConstOp>(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/passes/convert_xla_call_module_op_to_bfloat16.cc

        builder.setInsertionPoint(op);
        for (auto& op_operand : op->getOpOperands()) {
          if (IsLargeFloatType(op_operand.get().getType())) {
            op_operand.set(builder.create<TF::CastOp>(
                op->getLoc(), ToBfloat16Type(op_operand.get().getType()),
                op_operand.get()));
          }
        }
        builder.setInsertionPointAfter(op);
        for (auto op_result : op->getOpResults()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 08:32:43 UTC 2024
    - 5.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/transforms/lower_variable_ops_to_ml_program.cc

              op.getLoc(), globalOp.getType(),
              SymbolRefAttr::get(op->getContext(), globalOp.getSymName()));
          if (globalOp.getType() != op.getValue().getType()) {
            load = builder.create<TF::CastOp>(op.getLoc(), op.getValue().getType(),
                                              load->getResult(0));
          }
          op.getResult().replaceAllUsesWith(load->getResult(0));
          op.erase();
        });
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 06 23:53:00 UTC 2024
    - 7.6K bytes
    - Viewed (0)
Back to top