- Sort Score
- Result 10 results
- Languages All
Results 81 - 90 of 101 for broadcast_or (0.89 sec)
-
tensorflow/compiler/mlir/lite/tf_tfl_passes.cc
pass_manager.addPass(mlir::odml::CreateLegalizeHloToTfLitePass()); } // TF dialect passes pass_manager.addPass(mlir::odml::CreateLegalizeHloToTfPass()); // folds tf.BroadcastTo ops with subsequent ops if they have built in // broadcasting support. This needs to be run immediately after HLO->TF // legalization; otherwise other passes like `ConvertTFBroadcastTo` will
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 18:45:51 UTC 2024 - 25.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/lower_tf.mlir
// CHECK: "tf.BroadcastTo"(%[[ARG1]], %[[ARG0]]) %0 = "tf.Fill"(%arg0, %arg1) : (tensor<*xi64>, tensor<*xf32>) -> tensor<*xf32> func.return %0 : tensor<*xf32> } func.func @empty(%arg0: tensor<?xi32>) -> tensor<*xf32> { // CHECK-DAG: [[CST:%.+]] = "tf.Const"() <{value = dense<0.000000e+00> : tensor<f32>}> // CHECK-DAG: [[RES:%.+]] = "tf.BroadcastTo"([[CST]], %arg0)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jan 05 18:35:42 UTC 2024 - 92K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/fuse-tftext.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 460.3K bytes - Viewed (0) -
tensorflow/cc/gradients/array_grad_test.cc
} TEST_F(ArrayGradTest, BroadcastToGrad) { TensorShape x_shape({2, 5}); auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape)); TensorShape y_shape({3, 2, 5}); auto y = BroadcastTo(scope_, x, Const(scope_, {3, 2, 5})); RunTest(x, x_shape, y, y_shape); } TEST_F(ArrayGradTest, TileGrad) { TensorShape x_shape({2, 5}); auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 10 23:33:32 UTC 2023 - 19.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tensor_array_ops_decomposition.mlir
// Test read and write on a tensor list. // CHECK-LABEL: func @main func.func @main() -> tensor<3xf32> { %size = "tf.Const"() {value = dense<5> : tensor<i32>} : () -> tensor<i32> // CHECK: %[[BUFFER:.*]] = "tf.BroadcastTo" // CHECK-SAME: -> tensor<5x3xf32> // CHECK: %[[VAR:.*]] = "tf.MlirLocalVarOp"() : () -> tensor<!tf_type.resource<tensor<5x3xf32>>> // CHECK: "tf.AssignVariableOp"(%[[VAR]], %[[BUFFER]])
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 49K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/schema/schema_v3b.fbs
SCATTER_ND = 122, SELECT_V2 = 123, DENSIFY = 124, SEGMENT_SUM = 125, BATCH_MATMUL = 126, PLACEHOLDER_FOR_GREATER_OP_CODES = 127, CUMSUM = 128, CALL_ONCE = 129, BROADCAST_TO = 130, RFFT2D = 131, CONV_3D = 132, IMAG=133, REAL=134, COMPLEX_ABS=135, HASHTABLE = 136, HASHTABLE_FIND = 137, HASHTABLE_IMPORT = 138, HASHTABLE_SIZE = 139,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 14:28:27 UTC 2024 - 30K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/passes.h
// Create a pass that convert ops that copy tensors between devices, e.g. // tf.Identity. std::unique_ptr<OperationPass<mlir::func::FuncOp>> CreateTensorDeviceCopyConversionPass(); // Returns a pass that folds tf.BroadcastTo nodes with subsequent nodes if they // have built in broadcasting support. std::unique_ptr<OperationPass<func::FuncOp>> CreateBroadcastFoldPass(); void populateTfControlFlowToScfPatterns(MLIRContext* context,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 21:18:05 UTC 2024 - 31.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/schema/schema.fbs
SCATTER_ND = 122, SELECT_V2 = 123, DENSIFY = 124, SEGMENT_SUM = 125, BATCH_MATMUL = 126, PLACEHOLDER_FOR_GREATER_OP_CODES = 127, CUMSUM = 128, CALL_ONCE = 129, BROADCAST_TO = 130, RFFT2D = 131, CONV_3D = 132, IMAG=133, REAL=134, COMPLEX_ABS=135, HASHTABLE = 136, HASHTABLE_FIND = 137, HASHTABLE_IMPORT = 138, HASHTABLE_SIZE = 139,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.td
by a scalar, broadcasting (usually) confers some time or space benefit, as the broadcasted tensor is never materialized. However, `broadcast_to` does not carry with it any such benefits. The newly-created tensor takes the full memory of the broadcasted shape. (In a graph context, `broadcast_to` might be fused to subsequent operation and then be optimized away, however.) }]; let arguments = (ins
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 19:09:08 UTC 2024 - 186K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.cc
!llvm::dyn_cast_or_null<TFL::BroadcastToOp>(defining_op)) { return nullptr; } Value broadcast_shape = defining_op->getOperand( 1); // Broadcasted shape operand of BroadcastTo op. Operation* parent_of_defining_op = broadcast_shape.getDefiningOp(); if (!llvm::dyn_cast_or_null<TF::BroadcastArgsOp>(parent_of_defining_op) && !llvm::dyn_cast_or_null<TFL::BroadcastArgsOp>(parent_of_defining_op)) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 169.2K bytes - Viewed (0)