- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 24 for broadcastable (0.26 sec)
-
tensorflow/compiler/mlir/tensorflow/ir/tf_arith_ops_folder.h
// Scalar identity is broadcastable to any operand shape, we only need to // check that operand has the same shape as a result. bool scalar_identity = identity_ty.hasRank() && identity_ty.getRank() == 0; if (scalar_identity) return operand_ty == result_ty; // If identity is not a scalar, we must verify that identity shape is // statically known to be broadcastable to the operand shape and the operand
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/unroll_batch_matmul.cc
return failure(); } } // Ensure that batch shapes are broadcastable. tensorflow::MatMulBCast bcast( absl::InlinedVector<int64_t, 4>(lhs_shape.begin(), lhs_shape.end()), absl::InlinedVector<int64_t, 4>(rhs_shape.begin(), rhs_shape.end())); if (!bcast.IsValid()) { // Input batch dimensions must be broadcastable return failure(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/arithmetic_count_util.h
#include "mlir/IR/Operation.h" // from @llvm-project #include "mlir/Support/LLVM.h" // from @llvm-project namespace mlir { namespace TFL { // For add/mul/div/sub and other broadcastable ops. class ArithmeticCountUtilHelper { public: static bool GetFirstOutputCount(mlir::Operation* op, int64_t* count) { auto output = op->getResult(0); auto output_type =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 3.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.cc
} // Check if alpha is broadcastable for (int i = 0; i < alpha_type.getRank(); i++) { if (alpha_type.getDimSize(i) != input_type.getDimSize(i + 1) && alpha_type.getDimSize(i) != 1) { return op.emitOpError( llvm::formatv("'alpha' is not broadcastable at dimension {0}.", i)); } } }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 169.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/fold_broadcast.cc
if (shape_x.size() < 2 || shape_y.size() < 2) { return false; } // Checks outer dimensions (i.e., the dimensions higher than 2D) are // broadcastable. If true, then get the broadcasted shape for outer // dimension. if (!OpTrait::util::getBroadcastedShape( shape_x.drop_back(2), shape_y.drop_back(2), result_shape)) { return false;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/validators.cc
}); } bool IsBroadcastableElementsAttrs(mlir::TypedAttr a, mlir::TypedAttr b) { // This would return false if we had unranked tensors (where they should // probably be considered as broadcastable), but given we are working with // attributes here that shouldn't be an issue, return OpTrait::util::getBroadcastedType(a.getType(), b.getType()) != Type(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_tensor_helper.cc
auto result_type = OpTrait::util::getBroadcastedType(x.getType(), y.getType()); if (!result_type) { if (incompatible_shape_error.getValue()) { mlir::emitError(loc, "non-broadcastable operands"); } else { return UnrankedTensorType::get(builder->getI1Type()); } } auto ranked_type = mlir::dyn_cast<RankedTensorType>(result_type);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc
binding_output = op->getResult(0); binding_input = op->getOperand(0); binding_weight = op->getOperand(1); return success(); } // Makes the 1D value broadcastable with the `rhs_shape`. Value MakeOneDimValueBroadcastable(OpBuilder& builder, Location loc, Value value, ShapedType rhs_shape) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 13.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/push_transpose_through_ewise.cc
} return new_shape; } // Determine if op commutes with transposes. Requires a strict // definition of Elementwise, all i/o shapes and types must be same-rank // broadcastable and fully static. Consider moving this into attribute later. bool IsElementwise(Operation *op) { if (!(llvm::isa<TFL::AddOp, TFL::MulOp, TFL::DivOp, TFL::SubOp, TFL::MaximumOp, TFL::MinimumOp>(op))) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 12.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_executor.cc
tf_type::DropRefAndSubTypes(output_tensor_type)); if (!broadcasted_type) { return switchn.emitOpError() << "expects data operand to be broadcastable with all output types" << " but got " << operand0_tensor_type << " vs " << output_tensor_type; } } return success(); } void SwitchNOp::print(OpAsmPrinter &p) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 42.7K bytes - Viewed (0)