- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 11 for broadcast_or (0.24 sec)
-
tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir
// CHECK: %[[BROADCAST_TO:.*]] = "tfl.broadcast_to"(%arg0, %[[BROADCAST_ARGS]]) : (tensor<8x7x6x5x?x3x2x1xf32>, tensor<8xi64>) -> tensor<8x7x6x5x?x3x2x1xf32> // CHECK: %[[BROADCAST_TO_1:.*]] = "tfl.broadcast_to"(%arg1, %[[BROADCAST_ARGS]]) : (tensor<?x3x2x1xf32>, tensor<8xi64>) -> tensor<8x7x6x5x?x3x2x1xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 05 01:54:33 UTC 2024 - 153.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize.cc
// [1 x 1 x 1 x m*n] by the outermost reshape_op. // problem: The inefficiency here is that the innermost reshape_op and the // broadcast_op are introducing unnecessary leading and trailing 1s'. // fix: Remove the unnecessary 1s' in the inner reshape_op and broadcast_op. struct SqueezeReshapesAroundBroadcastOp : public OpRewritePattern<TFL::BroadcastToOp> { using OpRewritePattern<TFL::BroadcastToOp>::OpRewritePattern;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 102.3K bytes - Viewed (0) -
pkg/scheduler/schedule_one_test.go
broadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()}) ctx, cancel := context.WithCancel(context.Background()) defer cancel() informerFactory := informers.NewSharedInformerFactory(client, 0) sched, err := New( ctx, client, informerFactory, nil, profile.NewRecorderFactory(broadcaster), WithProfiles(
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Jun 04 06:20:55 UTC 2024 - 128.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/uniform-quantized-stablehlo-to-tfl.mlir
// CHECK: %[[BROADCAST:.+]] = "tfl.broadcast_to"(%[[ARG0]], %[[SHAPE]]) : (tensor<1x2x!quant.uniform<i8:f32, 2.000000e+00:3>>, tensor<2xi32>) -> tensor<3x2x!quant.uniform<i8:f32, 2.000000e+00:3>> // CHECK: return %[[BROADCAST]] // ----- // Tests that a quantized `stablehlo.broadcast_in_dim` is converted to // `tfl.transpose` and `tfl.broadcast_to` when `broadcast_dimensions` is not in // ascending order.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 17:10:32 UTC 2024 - 106.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/ops.mlir
%4 = "tfl.broadcast_args"(%2, %3) : (tensor<8xi64>, tensor<8xi64>) -> tensor<8xi64> %5 = "tfl.broadcast_to"(%arg0, %4) : (tensor<8x7x6x5x?x3x2x1xi1>, tensor<8xi64>) -> tensor<8x7x6x5x?x3x2x1xi1> %6 = "tfl.broadcast_to"(%arg1, %4) : (tensor<8x7x6x5x?x3x2x1xf32>, tensor<8xi64>) -> tensor<8x7x6x5x?x3x2x1xf32> %7 = "tfl.broadcast_to"(%arg2, %4) : (tensor<?x3x2x1xf32>, tensor<8xi64>) -> tensor<8x7x6x5x?x3x2x1xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 19:09:08 UTC 2024 - 189.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/uniform_quantized_stablehlo_to_tfl_pass.cc
TFL::QConstOp rhs_qconst_op; auto GetBroadcastedConstOp = [&](Value operand) -> TFL::QConstOp { if (auto broadcast_op = dyn_cast_or_null<stablehlo::BroadcastInDimOp>( operand.getDefiningOp())) { auto stablehlo_const_op = dyn_cast_or_null<stablehlo::ConstantOp>( broadcast_op.getOperand().getDefiningOp()); auto const_uniform_quantized_type =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 22 09:00:19 UTC 2024 - 99.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.td
by a scalar, broadcasting (usually) confers some time or space benefit, as the broadcasted tensor is never materialized. However, `broadcast_to` does not carry with it any such benefits. The newly-created tensor takes the full memory of the broadcasted shape. (In a graph context, `broadcast_to` might be fused to subsequent operation and then be optimized away, however.) }]; let arguments = (ins
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 19:09:08 UTC 2024 - 186K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.cc
!llvm::dyn_cast_or_null<TFL::BroadcastToOp>(defining_op)) { return nullptr; } Value broadcast_shape = defining_op->getOperand( 1); // Broadcasted shape operand of BroadcastTo op. Operation* parent_of_defining_op = broadcast_shape.getDefiningOp(); if (!llvm::dyn_cast_or_null<TF::BroadcastArgsOp>(parent_of_defining_op) && !llvm::dyn_cast_or_null<TFL::BroadcastArgsOp>(parent_of_defining_op)) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 169.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.td
%emptyf = "tf.Cast"(%emptyi) : (tensor<i32>) -> tensor<f32> %size_shape = "tf.Const"() {value = dense<[10, 8, 4]> : tensor<3xi32>} : () -> tensor<3xi32> %tl = "tf.BroadcastTo"(%emptyf, %size_shape) : (tensor<f32>, tensor<3xi32>) -> tensor<10x8x4xf32> // TensorListPushBack lowering %index_in_list = "tf.Const"() {value = dense<0> : tensor<1xi32>} : () -> tensor<1xi32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 21:18:05 UTC 2024 - 99.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tf-ops.mlir
func.return %r0, %r1 : tensor<0xi32>, tensor<3xi32> } // ----- // Test valid tf.BroadcastTo // CHECK-LABEL: func @testBroadcastTo(%arg0: tensor<16xf32>) func.func @testBroadcastTo(%arg0: tensor<16xf32>) -> tensor<16x16x16x16xf32> { %cst = arith.constant dense<16> : tensor<4xi32> %0 = "tf.BroadcastTo"(%arg0, %cst) : (tensor<16xf32>, tensor<4xi32>) -> tensor<16x16x16x16xf32> func.return %0 : tensor<16x16x16x16xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 23 14:40:35 UTC 2023 - 236.4K bytes - Viewed (0)