- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 12 for broadcastable (0.22 sec)
-
tensorflow/compiler/mlir/tensorflow/tests/tf_executor_ops_invalid.mlir
// expected-error@-1 {{'tf_executor.Merge' op expects all operands to be broadcastable with output type but got 'tensor<i1>' vs 'tensor<*xf32>'}} tf_executor.fetch %value : tensor<*xf32> } func.return %result : tensor<*xf32> } // ----- // Check that merge data inputs are broadcastable to the output func.func @invalid_merge(%arg0: tensor<*xf32>, %arg1: tensor<4xf32>) -> tensor<8xf32> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Oct 19 01:12:10 UTC 2023 - 28.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/canonicalize.td
// Canonicalize: Log(1.0 + x) to Log1p(x) // // We currently do this rewrite only if the constant `1` is a scalar, because // it is safely broadcastable to any shape. To be able to canonicalize when // constant values is not a scalar, we have to first prove that it is // broadcastable to `x`, which requires static shape information. def LogToLog1p : Pat< (TF_LogOp:$src (TF_AddV2Op $arg,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Dec 06 18:42:28 UTC 2023 - 17K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_executor_ops.td
} def TfExecutor_SwitchOp : TfExecutor_Op<"Switch", [ControlOperandsAfterAllData, HasParent<"GraphOp">, PredOpTrait<"data operand must be broadcastable to true result", TF_OpIsBroadcastableToRes<0, 0>>, PredOpTrait<"data operand must be broadcastable to false result", TF_OpIsBroadcastableToRes<0, 1>>]>{ let summary = [{
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 23 19:35:12 UTC 2023 - 22K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/unroll_batch_matmul.cc
return failure(); } } // Ensure that batch shapes are broadcastable. tensorflow::MatMulBCast bcast( absl::InlinedVector<int64_t, 4>(lhs_shape.begin(), lhs_shape.end()), absl::InlinedVector<int64_t, 4>(rhs_shape.begin(), rhs_shape.end())); if (!bcast.IsValid()) { // Input batch dimensions must be broadcastable return failure(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_op_base.td
// the core implementation requiring SameOperandsAndResultType. // // This shouldn't be used for side effecting ops. def TF_Involution : NativeOpTrait<"TF::IsInvolution">; // Variant of broadcastable trait that considers TF's subtype behavior. class TF_OpIsBroadcastableToRes<int opId, int resId> : And<[ TCOpResIsShapedTypePred<opId, resId>, CPred<"mlir::tf_type::BroadcastCompatible("
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 30.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-binary-elementwise.mlir
func.return %1: tensor<2xi32> } // CHECK-LABEL: func @broadcast_add // TODO(laurenzo): Change this to a (5 + 2x1) shaped add to make the check // patterns unambiguous and more interesting (once broadcastable trait is // fixed upstream). func.func @broadcast_add(%arg0: tensor<1xi32>, %arg1: tensor<1x2xi32>) -> tensor<1x2xi32> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 06 15:32:52 UTC 2024 - 18.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc
binding_output = op->getResult(0); binding_input = op->getOperand(0); binding_weight = op->getOperand(1); return success(); } // Makes the 1D value broadcastable with the `rhs_shape`. Value MakeOneDimValueBroadcastable(OpBuilder& builder, Location loc, Value value, ShapedType rhs_shape) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 13.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/push_transpose_through_ewise.cc
} return new_shape; } // Determine if op commutes with transposes. Requires a strict // definition of Elementwise, all i/o shapes and types must be same-rank // broadcastable and fully static. Consider moving this into attribute later. bool IsElementwise(Operation *op) { if (!(llvm::isa<TFL::AddOp, TFL::MulOp, TFL::DivOp, TFL::SubOp, TFL::MaximumOp, TFL::MinimumOp>(op))) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 12.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_executor.cc
tf_type::DropRefAndSubTypes(output_tensor_type)); if (!broadcasted_type) { return switchn.emitOpError() << "expects data operand to be broadcastable with all output types" << " but got " << operand0_tensor_type << " vs " << output_tensor_type; } } return success(); } void SwitchNOp::print(OpAsmPrinter &p) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 42.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_patterns.td
//===----------------------------------------------------------------------===// // Check that two values can be broadcasted together def AreBroadcastCompatible : Constraint<CPred<"AreBroadcastCompatible($0, $1)">, "types must be broadcastable">; class DirectBinaryPat<Op FromOp, Op ToOp> : Pat<(FromOp AnyTensor:$l, AnyTensor:$r), (ToOp $l, $r, (BinBroadcastDimensions $l, $r))>; foreach fromToBinPair = [[TF_AddV2Op, CHLO_BroadcastAddOp],
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 06 18:46:23 UTC 2024 - 34.8K bytes - Viewed (0)