- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 17 for broadcasts (0.2 sec)
-
tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.td
"clEnumValN(MoveTransposeDirection::kEnd, \"end\", \"end of the block\"))"> ]; } def BroadcastFoldPass : Pass<"tf-broadcast-fold", "mlir::func::FuncOp"> { let summary = "Fold explicit broadcasts into the following operations if they " "support implicit broadcasting on their operand."; let constructor = "TF::CreateBroadcastFoldPass()"; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 21:18:05 UTC 2024 - 99.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize.cc
} // Returns whether the given type `a` is broadcast-compatible with `b`. bool IsBroadcastableElementsAttrAndType(Type a, Type b) { return OpTrait::util::getBroadcastedType(a, b) != Type(); } // Returns whether the resultant type of any broadcastable operation with // operands `a` and `b` matches `expected_output`. Returns false if `a` is not // broadcast-compatible with `b`.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 102.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.cc
// It will treat the unknown shape inputs as acceptable inputs for model // compatibility if all known ranks are no bigger than the allowed broadcast // maximum rank. if (max_rank <= max_bcast_rank) { return true; } // Checks if all operands are broadcasted by BroadcastTo ops with the shape // is calculated from the same BroadcastArgs op. In such case, all operands // will have the same shape.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 169.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_a_m.cc
// // For given bcasted_shape, s0_shape and s1_shape, the broadcasted dimension is // calculated and push back to its corresponding result, r0 or r1. For example, // for s0_shape [1,4] and s1_shape [4, 4], bcasted_shape is computed to be // [4,4] - this leads to the result of r0 to be [0] as the first dimension of s0 // is broadcasted, and r1 to be <> as no broadcasting is happening for s1.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 146.7K bytes - Viewed (0) -
pkg/scheduler/schedule_one_test.go
broadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()}) ctx, cancel := context.WithCancel(context.Background()) defer cancel() informerFactory := informers.NewSharedInformerFactory(client, 0) sched, err := New( ctx, client, informerFactory, nil, profile.NewRecorderFactory(broadcaster), WithProfiles(
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Jun 04 06:20:55 UTC 2024 - 128.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/uniform-quantized-stablehlo-to-tfl.mlir
// CHECK: %[[SHAPE:.+]] = arith.constant // CHECK{LITERAL}: dense<[3, 2]> : tensor<2xi32> // CHECK: %[[BROADCAST:.+]] = "tfl.broadcast_to"(%[[ARG0]], %[[SHAPE]]) : (tensor<1x2x!quant.uniform<i8:f32, 2.000000e+00:3>>, tensor<2xi32>) -> tensor<3x2x!quant.uniform<i8:f32, 2.000000e+00:3>> // CHECK: return %[[BROADCAST]] // ----- // Tests that a quantized `stablehlo.broadcast_in_dim` is converted to
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 17:10:32 UTC 2024 - 106.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.cc
// SoftmaxCrossEntropyWithLogitsOp //===----------------------------------------------------------------------===// // Verifies that, // // * Input types are broadcast compatible and the broadcasted type has rank two. // LogicalResult SoftmaxCrossEntropyWithLogitsOp::verify() { SoftmaxCrossEntropyWithLogitsOp op = *this; auto broadcasted_ty =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 22:07:10 UTC 2024 - 170.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.td
[1 2 3]], shape=(3, 3), dtype=int32) In the above example, the input Tensor with the shape of `[1, 3]` is broadcasted to output Tensor with shape of `[3, 3]`. When doing broadcasted operations such as multiplying a tensor by a scalar, broadcasting (usually) confers some time or space benefit, as the broadcasted tensor is never materialized. However, `broadcast_to` does not carry with it any such benefits.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 19:09:08 UTC 2024 - 186K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tf-ops.mlir
func.return %0: tensor<*xi32> } // ----- // Test invalid tf.SelectV2: this is an invalid broadcast for the predicate func.func @testInvalidSelectV2(%arg0: tensor<3xi1>, %arg1: tensor<3x2xf16>, %arg2: tensor<3x2xf16>) -> tensor<3x2xf16> { // expected-error @+1 {{operands don't have broadcast-compatible shapes}} %0 = "tf.SelectV2"(%arg0, %arg1, %arg2) : (tensor<3xi1>, tensor<3x2xf16>, tensor<3x2xf16>) -> tensor<3x2xf16>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 23 14:40:35 UTC 2023 - 236.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/ops.mlir
} // ----- func.func @testPReluWrongOutputRank(%arg0: tensor<10x10x10x10xf32>, %arg1: tensor<10x10x10x10xf32>) -> tensor<10x10xf32> { // expected-error @+1 {{'tfl.prelu' op result type '10x10' not broadcast compatible with broadcasted operands's shapes '10x10x10x10'}} %0 = "tfl.prelu"(%arg0, %arg1) : (tensor<10x10x10x10xf32>, tensor<10x10x10x10xf32>) -> tensor<10x10xf32> func.return %0 : tensor<10x10xf32> } // -----
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 19:09:08 UTC 2024 - 189.2K bytes - Viewed (0)