- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 42 for num_inputs (0.24 sec)
-
tensorflow/c/kernels_test.cc
.Attr("SomeDataTypeAttr: type"); static int num_inputs = 0; static int num_outputs = 0; // A kernel whose Compute function has a side-effect of updating num_inputs // and num_outputs. Various functions on TF_OpKernelContext are also // exercised. auto my_compute_func = [](void* kernel, TF_OpKernelContext* ctx) { num_inputs = TF_NumInputs(ctx); num_outputs = TF_NumOutputs(ctx);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 50.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_saver_op.cc
absl::AbortedError( "The `ids` and `calibration_methods` must have the same size.")); // Check the number and type of inputs. OP_REQUIRES(context, context->num_inputs() == ids_.size() * 3, absl::AbortedError("The number of inputs must be three times " "the size of the `ids` list.")); for (int i = 0; i < ids_.size(); ++i) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 13 01:31:23 UTC 2024 - 8K bytes - Viewed (0) -
tensorflow/compiler/jit/variable_info_util.cc
} return absl::OkStatus(); } std::vector<int> GetResourceVariableIndicesFromContext(OpKernelContext* ctx) { std::vector<int> out; for (int64 i = 0; i < ctx->num_inputs(); i++) { if (ctx->input(i).dtype() == DT_RESOURCE) { out.push_back(i); } } return out; } Status CreateVariableInfoLookup( absl::Span<VariableInfo const> variable_args,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 7K bytes - Viewed (0) -
tensorflow/compiler/jit/build_xla_ops_pass.cc
if (num_constant_inputs < 0 || num_resource_inputs < 0 || num_constant_inputs + num_resource_inputs > n->num_inputs()) { return errors::InvalidArgument( "Invalid number of constant/resource arguments to XLA kernel."); } int num_non_constant_inputs = n->num_inputs() - num_constant_inputs - num_resource_inputs; std::vector<const Edge*> input_edges_vector;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 12 06:33:33 UTC 2024 - 24.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/tpu_cluster_formation.cc
auto input = pos_and_input.value(); bool is_packed = input.getIsPacked(); const int num_operands = input->getNumOperands(); int num_inputs = is_packed ? 1 : num_replicas; if (num_operands != num_inputs) return input->emitOpError() << "requires " << num_inputs << " operands"; if (is_packed) { packed_inputs.push_back(input->getOperand(0)); packed_ops.push_back(input); } else {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 22:03:30 UTC 2024 - 39.3K bytes - Viewed (0) -
tensorflow/compiler/jit/extract_outside_compilation_pass.cc
// if they are all fully defined; std::nullopt otherwise. std::optional<std::vector<PartialTensorShape>> GetInferredInputShapes( int num_inputs, Node* send_from_host_node) { std::vector<PartialTensorShape> results(num_inputs); for (int i = 0; i < num_inputs; i++) { const Edge* e; if (!send_from_host_node->input_edge(i, &e).ok()) { return std::nullopt; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 12 06:33:33 UTC 2024 - 104.7K bytes - Viewed (0) -
tensorflow/compiler/jit/shape_inference.cc
switch_input == n; if (is_loop_invariant) { shape_inference::InferenceContext* context = shape_refiner->GetContext(n); for (int i = 0; i < n->num_inputs(); i++) { const Node* input_node; if (n->input_node(i, &input_node).ok()) { auto shapes_and_types = context->input_handle_shapes_and_types(i); if (shapes_and_types) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 00:41:19 UTC 2024 - 13K bytes - Viewed (0) -
tensorflow/c/c_api.cc
} void TF_AddInputList(TF_OperationDescription* desc, const TF_Output* inputs, int num_inputs) { std::vector<NodeBuilder::NodeOut> input_list; input_list.reserve(num_inputs); for (int i = 0; i < num_inputs; ++i) { input_list.emplace_back(&inputs[i].oper->node, inputs[i].index); } desc->node_builder.Input(input_list); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 15 03:35:10 UTC 2024 - 102.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/lower_static_tensor_list.cc
FunctionType func_type = func.getFunctionType(); int num_inputs = func_type.getNumInputs(); int num_results = func_type.getNumResults(); // For each argument type in function's arguments, change it to uranked // tensor type if it's a variant type. SmallVector<Type, 8> updated_argument_types; updated_argument_types.reserve(num_inputs); UpdateTensorListTypes<mlir::OperandRange>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 20:00:43 UTC 2024 - 70.7K bytes - Viewed (0) -
tensorflow/c/eager/c_api.cc
} void TFE_OpAddInputList(TFE_Op* op, TFE_TensorHandle** inputs, int num_inputs, TF_Status* status) { status->status = tensorflow::unwrap(op)->AddInputList( {reinterpret_cast<tensorflow::AbstractTensorHandle**>( tensorflow::unwrap(inputs)), static_cast<size_t>(num_inputs)}); } extern int TFE_OpGetFlatInputCount(const TFE_Op* op, TF_Status* status) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 08:11:23 UTC 2024 - 44K bytes - Viewed (0)