- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 15 for DivOp (0.12 sec)
-
tensorflow/compiler/mlir/tensorflow/ir/tf_arith_ops_folder.h
#include "mlir/IR/Value.h" // from @llvm-project #include "mlir/Support/LLVM.h" // from @llvm-project namespace mlir { class Operation; namespace TF { class AddV2Op; class SubOp; class MulOp; class DivOp; class RealDivOp; // Verifies an reduction op's `input` and reduction `dims`. LogicalResult VerifyReductionInputAndDims(Value input, Value dims, Location loc);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/odml_converter/folders.cc
// element attributes. static const APFloat& AddSign(const APFloat& v) { return v; } static APSInt AddSign(const APInt& v) { return APSInt(v); } template <typename ResultType> static LogicalResult FoldDivOpInternal(stablehlo::DivOp op, PatternRewriter& rewriter) { auto adaptor = FoldAdaptor::Create(op); if (!adaptor.has_value()) { return failure(); } auto const_oprs = adaptor.value().OperandData();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 06:11:55 UTC 2024 - 4.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.cc
auto quant_to_float = rewriter.create<DivOp>(op.getLoc(), float_diff, quant_diff); auto float_to_quant = rewriter.create<DivOp>(op.getLoc(), quant_diff, float_diff); // During quantization, the quantized min/max values may not line up // perfectly with the specified min/max. Nudge them into the right range. auto min_scaled = rewriter.create<DivOp>(op.getLoc(), float_min, quant_to_float);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 74.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils.cc
}; auto scalar_sub = [&](Value lhs, Value rhs) { return builder.create<TF::SubOp>(loc, int32_scalar_type, lhs, rhs); }; auto scalar_div = [&](Value lhs, Value rhs) { return builder.create<TF::DivOp>(loc, int32_scalar_type, lhs, rhs); }; // effective_filter_size = (filter_size - 1) * dilation_rate + 1 Value stride_value = CreateScalarConstValue<int32_t>(builder, loc, stride); Value dilation_rate_value =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 13.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/hardwares/gpu_hardware.cc
} // Op registrations TAC_REGISTER_GPU_OP(AbsOp, CreateBasicOpNoCost); TAC_REGISTER_GPU_OP(AveragePool2DOp, CreateBasicOpNoCost); TAC_REGISTER_GPU_OP(CosOp, CreateBasicOpNoCost); TAC_REGISTER_GPU_OP(DivOp, CreateBasicOpNoCost); TAC_REGISTER_GPU_OP(ExpOp, CreateBasicOpNoCost); TAC_REGISTER_GPU_OP(HardSwishOp, CreateBasicOpNoCost); TAC_REGISTER_GPU_OP(LogOp, CreateBasicOpNoCost);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 06 03:08:33 UTC 2023 - 7.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/push_transpose_through_ewise.cc
// definition of Elementwise, all i/o shapes and types must be same-rank // broadcastable and fully static. Consider moving this into attribute later. bool IsElementwise(Operation *op) { if (!(llvm::isa<TFL::AddOp, TFL::MulOp, TFL::DivOp, TFL::SubOp, TFL::MaximumOp, TFL::MinimumOp>(op))) { return false; } auto opr1_type = llvm::dyn_cast_or_null<RankedTensorType>(op->getOperand(0).getType()); auto opr2_type =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 12.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_a_m.cc
results.add<DivNoNanOrMulNoNanConstantY<TF::DivNoNanOp, TF::DivOp>>(context); } //===----------------------------------------------------------------------===// // DivOp //===----------------------------------------------------------------------===// void DivOp::getCanonicalizationPatterns(RewritePatternSet& results, MLIRContext* context) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 146.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize.cc
// 2) Is only used by binary op like AddOp, SubOp, MulOp or DivOp. bool HasOneUseOrUsedByOnlyBinaryOps(Value out_value) { if (out_value.hasOneUse()) { return true; } for (auto &use : out_value.getUses()) { mlir::Operation *owner = use.getOwner(); if (!llvm::isa<mlir::TFL::AddOp>(owner) && !llvm::isa<mlir::TFL::SubOp>(owner) && !llvm::isa<mlir::TFL::DivOp>(owner) &&
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 102.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/legalization_op_config.cc
TypeID::get<TF::Conv3DBackpropInputV2Op>(), TypeID::get<TF::CumprodOp>(), TypeID::get<TF::CumsumOp>(), TypeID::get<TF::DepthwiseConv2dNativeOp>(), TypeID::get<TF::DivOp>(), TypeID::get<TF::DynamicStitchOp>(), TypeID::get<TF::_EagerConstOp>(), TypeID::get<TF::EmptyOp>(), TypeID::get<TF::ExpandDimsOp>(), TypeID::get<TF::FakeQuantWithMinMaxVarsOp>(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 04:08:35 UTC 2024 - 21.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc
ApplyExplicitBroadcasting<TF::AddV2Op>, ApplyExplicitBroadcasting<TF::MulOp>, ApplyExplicitBroadcasting<TF::DivOp>, ApplyExplicitBroadcasting<TF::RealDivOp>, ApplyExplicitBroadcasting<TF::SubOp>, ApplyExplicitBroadcasting<TF::FloorDivOp>,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 20:06:54 UTC 2024 - 45.2K bytes - Viewed (0)