- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 57 for num_inputs (0.16 sec)
-
tensorflow/c/c_api.cc
} void TF_AddInputList(TF_OperationDescription* desc, const TF_Output* inputs, int num_inputs) { std::vector<NodeBuilder::NodeOut> input_list; input_list.reserve(num_inputs); for (int i = 0; i < num_inputs; ++i) { input_list.emplace_back(&inputs[i].oper->node, inputs[i].index); } desc->node_builder.Input(input_list); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 15 03:35:10 UTC 2024 - 102.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/lower_static_tensor_list.cc
FunctionType func_type = func.getFunctionType(); int num_inputs = func_type.getNumInputs(); int num_results = func_type.getNumResults(); // For each argument type in function's arguments, change it to uranked // tensor type if it's a variant type. SmallVector<Type, 8> updated_argument_types; updated_argument_types.reserve(num_inputs); UpdateTensorListTypes<mlir::OperandRange>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 20:00:43 UTC 2024 - 70.7K bytes - Viewed (0) -
tensorflow/c/eager/c_api.cc
} void TFE_OpAddInputList(TFE_Op* op, TFE_TensorHandle** inputs, int num_inputs, TF_Status* status) { status->status = tensorflow::unwrap(op)->AddInputList( {reinterpret_cast<tensorflow::AbstractTensorHandle**>( tensorflow::unwrap(inputs)), static_cast<size_t>(num_inputs)}); } extern int TFE_OpGetFlatInputCount(const TFE_Op* op, TF_Status* status) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 08:11:23 UTC 2024 - 44K bytes - Viewed (0) -
tensorflow/compiler/jit/device_compiler.h
ExecutableType** out_executable) { DCHECK_NE(out_executable, nullptr); VLOG(2) << "DeviceCompiler::Compile " << DebugString(); if (VLOG_IS_ON(2)) { VLOG(2) << "num_inputs=" << args.size(); for (int i = 0, end = args.size(); i < end; i++) { VLOG(3) << i << ": " << args[i].HumanString(); } } TF_ASSIGN_OR_RETURN(auto signature,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 22.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.cc
// TODO(ycao): Support computation with compile-time constant, which requires // non-trivial input mapping as implemented now. void GetInputMappingForMlir(int num_inputs, std::vector<int>* input_mapping) { input_mapping->resize(num_inputs, 0); std::iota(input_mapping->begin(), input_mapping->end(), 0); } static void RegisterDialects(mlir::DialectRegistry& registry) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 17:24:39 UTC 2024 - 45.3K bytes - Viewed (0) -
tensorflow/cc/gradients/linalg_grad.cc
return errors::InvalidArgument("Equation must contain a single ->"); } const absl::string_view input_subs = equation_split[0]; const absl::string_view output_subs = equation_split[1]; if (op.num_inputs() == 1) { // For the unary einsum z = einsum("{eq_x}->{eq_z}", x), the gradient wrt // the input (VJP) is given by the reversed equation: // grad_wrt_x = einsum("{eq_z}->{eq_x}", grad_wrt_z)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 07 23:11:54 UTC 2022 - 20.4K bytes - Viewed (0) -
tensorflow/c/eager/c_api.h
TF_CAPI_EXPORT extern void TFE_OpAddInputList(TFE_Op* op, TFE_TensorHandle** inputs, int num_inputs, TF_Status* status); // Fetches the current number of inputs attached to `op`. // // Does not use the operation's definition to determine how many inputs should
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 27 21:07:00 UTC 2023 - 22.8K bytes - Viewed (0) -
tensorflow/c/kernels_experimental.cc
} } bool TF_IsRefInput(TF_OpKernelContext* ctx, int i, TF_Status* status) { auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx); if (i < 0 || i >= cc_ctx->num_inputs()) { TF_SetStatus(status, TF_OUT_OF_RANGE, "input index out of range"); return false; } TF_SetStatus(status, TF_OK, ""); return cc_ctx->input_is_ref(i); } #ifndef IS_MOBILE_PLATFORM
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 23 06:12:29 UTC 2024 - 30.9K bytes - Viewed (0) -
tensorflow/compiler/jit/extract_outside_compilation_pass_test.cc
send_recv_nodes.push_back(n); } } EXPECT_EQ(num_send_from_host, 1); EXPECT_EQ(num_recv_at_host, 1); for (Node *n : send_recv_nodes) { Node *input_node; TF_CHECK_OK(n->input_node(n->num_inputs() - 1, &input_node)); EXPECT_EQ(input_node, key_placeholder); bool has_control_edge_to_sequencer = false; for (const Edge *e : n->out_edges()) { if (e->IsControlEdge() && e->dst() == sequencer) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 41K bytes - Viewed (0) -
tensorflow/compiler/jit/kernels/xla_ops.cc
: OpKernel(ctx), platform_info_(XlaPlatformInfoFromDevice(ctx->device())) {} void XlaRunOp::Compute(OpKernelContext* ctx) { VLOG(3) << "XlaRunOp " << def().name(); Tensor key_tensor = ctx->input(ctx->num_inputs() - 1); bool use_pjrt = GetXlaOpsCommonFlags() ->tf_xla_use_device_api.IsEnabledInXlaCompileAndRunForDevice( platform_info_.device_type()); if (use_pjrt) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 41.4K bytes - Viewed (0)