- Sort Score
- Result 10 results
- Languages All
Results 1 - 7 of 7 for AddN (0.5 sec)
-
tensorflow/compiler/mlir/tensorflow/transforms/tensor_list_ops_decomposition.cc
return failure(); } } else if (auto addn = llvm::dyn_cast<TF::AddNOp>(&op)) { auto it = buffer_to_size->find(addn.getOperand(0)); if (it != buffer_to_size->end()) { addn.getSum().setType( mlir::cast<TensorType>(addn.getOperand(0).getType())); auto size = it->getSecond(); (*buffer_to_size)[addn.getSum()] = size; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 39.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/legalize_tensorlist.cc
}); return has_variant_input || has_variant_output; } // There are 2 standard tf ops which are not TensorList ops that may take as // input a tensorlist. These are tf.AddN and tf.ZeroesLike. Since the runtime // implementation of a tensorlist are not compatible between tf and tflite // we cannot use tflite tensorlist kernels until these cases are handled.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 07 23:04:40 UTC 2024 - 10.6K bytes - Viewed (0) -
tensorflow/compiler/jit/shape_inference_test.cc
auto c = ops::Placeholder(root.WithOpName("C"), DT_FLOAT); auto d = ops::Add(root.WithOpName("D"), a, b); auto e = ops::Add(root.WithOpName("E"), d, c); auto f = ops::Neg(root.WithOpName("F"), e); auto g = ops::AddN(root.WithOpName("G"), std::initializer_list<Output>{e, f}); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_CHECK_OK(root.ToGraph(graph.get())); GraphShapeInfo shape_info;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 00:41:19 UTC 2024 - 10.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/mlrt/tf_to_mlrt.mlir
// CHECK: [[d:%.*]] = tf_mlrt.await [[futures]]#2 %d = "tf_mlrt.tf_await"(%future_d) : (!mlrt.future) ->tensor<i32> // CHECK: [[result:%.*]] = tf_mlrt.executeop([[b]], [[d]], [[f]]) // CHECK-SAME: AddN %result = "tf.AddN"(%b, %d, %f) {__op_key = 9: i32}: (tensor<i32>, tensor<i32>, tensor<i32>) -> tensor<i32> // CHECK: mlrt.await_handle [[handle_0]] // CHECK: mlrt.await_handle [[handle_1]] mlrt.await_handle %handle_0
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 20:44:15 UTC 2024 - 24.7K bytes - Viewed (0) -
tensorflow/c/eager/gradients.cc
if (gradient_tensors.size() == 1) { return gradient_tensors[0]; } AbstractOperationPtr op(ctx_->CreateOperation()); Status s = op->Reset("AddN", /*raw_device_name=*/nullptr); if (!s.ok()) { return nullptr; } s = op->AddInputList(gradient_tensors); if (!s.ok()) { return nullptr; } int num_outputs = 1;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 15 09:49:45 UTC 2024 - 19.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-with-tf2xla-hlo-importer.mlir
} // CHECK-LABEL: unsupported_dtype func.func @unsupported_dtype(%arg0: tensor<2x!tf_type.variant>) -> tensor<2x!tf_type.variant> { // CHECK: tf.AddN // expected-remark@+1 {{skipping legalization due to unsupported type 'tensor<2x!tf_type.variant>'}} %0 = "tf.AddN"(%arg0, %arg0) : (tensor<2x!tf_type.variant>, tensor<2x!tf_type.variant>) -> tensor<2x!tf_type.variant> func.return %0 : tensor<2x!tf_type.variant> }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 06 15:32:52 UTC 2024 - 38.6K bytes - Viewed (0) -
tensorflow/cc/framework/gradients.cc
// Just one backprop edge. *grad = grads_to_keep[0]; } else { // Otherwise, adds backprop-ed gradients. // TODO(andydavis) Use a better accumulator here. *grad = ops::AddN(scope_, grads_to_keep); } return absl::OkStatus(); } bool SymbolicGradientBuilder::IsPrimitiveOpWithNoGrad(const string& opname) { ops::GradFunc grad_fn;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 13 05:57:22 UTC 2024 - 22K bytes - Viewed (0)