- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 23 for AddN (0.09 sec)
-
tensorflow/compiler/mlir/tensorflow/tests/tensor_list_ops_decomposition.mlir
// CHECK-NEXT: %[[ADDN:.*]] = "tf.AddN"(%[[UPDATE]], %[[BROADCAST]]) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xf32> %addn = "tf.AddN"(%set, %tl) : (tensor<!tf_type.variant<tensor<f32>>>, tensor<!tf_type.variant<tensor<f32>>>) -> tensor<!tf_type.variant<tensor<f32>>> // CHECK-NEXT: %[[ZEROS_LIKE:.*]] = "tf.ZerosLike"(%[[ADDN]]) : (tensor<10xf32>) -> tensor<10xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 38.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tensor_list_ops_decomposition.cc
return failure(); } } else if (auto addn = llvm::dyn_cast<TF::AddNOp>(&op)) { auto it = buffer_to_size->find(addn.getOperand(0)); if (it != buffer_to_size->end()) { addn.getSum().setType( mlir::cast<TensorType>(addn.getOperand(0).getType())); auto size = it->getSecond(); (*buffer_to_size)[addn.getSum()] = size; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 39.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/legalize_tensorlist.cc
}); return has_variant_input || has_variant_output; } // There are 2 standard tf ops which are not TensorList ops that may take as // input a tensorlist. These are tf.AddN and tf.ZeroesLike. Since the runtime // implementation of a tensorlist are not compatible between tf and tflite // we cannot use tflite tensorlist kernels until these cases are handled.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 07 23:04:40 UTC 2024 - 10.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir
// CHECK: [[ADD_N:%.*]] = "tf.AddN"(%arg0, [[ZERO]], [[ONE]]) // CHECK: return %arg0, %arg0, [[ZERO]], [[ADD_N]] %2 = "tf.AddN"(%arg0, %1, %1) : (tensor<2xf32>, tensor<2xf32>, tensor<2xf32>) -> tensor<2xf32> %3 = "tf.AddN"(%1, %arg0, %1) : (tensor<2xf32>, tensor<2xf32> , tensor<2xf32>) -> tensor<2xf32> %4 = "tf.AddN"(%1, %1) : (tensor<2xf32>, tensor<2xf32>) -> tensor<2xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 22:07:10 UTC 2024 - 132.1K bytes - Viewed (0) -
tensorflow/c/kernels_experimental.h
TF_CAPI_EXPORT extern bool TF_IsRefInput(TF_OpKernelContext* ctx, int i, TF_Status* status); #ifndef IS_MOBILE_PLATFORM // Expose higher level AddN operation for Pluggable vendors to implement // in the plugin for Variant data types. The API takes in the context and a // callback provided by pluggable vendor to do a Binary Add operation on the
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Aug 07 14:44:39 UTC 2023 - 9.4K bytes - Viewed (0) -
tensorflow/compiler/jit/shape_inference_test.cc
auto c = ops::Placeholder(root.WithOpName("C"), DT_FLOAT); auto d = ops::Add(root.WithOpName("D"), a, b); auto e = ops::Add(root.WithOpName("E"), d, c); auto f = ops::Neg(root.WithOpName("F"), e); auto g = ops::AddN(root.WithOpName("G"), std::initializer_list<Output>{e, f}); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_CHECK_OK(root.ToGraph(graph.get())); GraphShapeInfo shape_info;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 00:41:19 UTC 2024 - 10.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.cc
return rewriter.create<ConcatV2Op>(loc, type, ValueRange(vals), axis); } // Lowers AddN op to a sequence of AddV2 ops to accumulate operands. // // Note that to improve the parallelism, AddN op uses tree-based reduction. // For example, tf.AddN([0, 1, 2, 3, 4]) behaves as follows: // // 0 1 2 3 4 // | | | | |
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 74.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/mlrt/tf_to_mlrt.mlir
// CHECK: [[d:%.*]] = tf_mlrt.await [[futures]]#2 %d = "tf_mlrt.tf_await"(%future_d) : (!mlrt.future) ->tensor<i32> // CHECK: [[result:%.*]] = tf_mlrt.executeop([[b]], [[d]], [[f]]) // CHECK-SAME: AddN %result = "tf.AddN"(%b, %d, %f) {__op_key = 9: i32}: (tensor<i32>, tensor<i32>, tensor<i32>) -> tensor<i32> // CHECK: mlrt.await_handle [[handle_0]] // CHECK: mlrt.await_handle [[handle_1]] mlrt.await_handle %handle_0
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 20:44:15 UTC 2024 - 24.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/legalize-tensorlist.mlir
} // ----- // CHECK-LABEL: variantAddN func.func @variantAddN(%arg0: tensor<!tf_type.variant<tensor<*xi32>>>, %arg1: tensor<!tf_type.variant<tensor<*xi32>>>) -> tensor<!tf_type.variant<tensor<*xi32>>> { %1 = "tf.AddN"(%arg0, %arg1) : (tensor<!tf_type.variant<tensor<*xi32>>>, tensor<!tf_type.variant<tensor<*xi32>>>) -> tensor<!tf_type.variant<tensor<*xi32>>>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 9.5K bytes - Viewed (0) -
tensorflow/cc/gradients/math_grad_test.cc
RunTest({x}, {x_shape}, {y}, {y_shape}); } TEST_F(NaryGradTest, AddN) { TensorShape shape({3, 2, 5}); std::vector<Output> xs; xs.push_back(Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape))); xs.push_back(Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape))); xs.push_back(Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape))); auto y = AddN(scope_, xs); RunTest(xs, {shape, shape, shape}, {y}, {shape}); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Aug 25 18:20:20 UTC 2023 - 36K bytes - Viewed (0)