- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 16 for num_inputs (0.17 sec)
-
tensorflow/c/c_api_experimental.cc
using tensorflow::shape_inference::ShapeHandle; const int num_inputs = input_shapes->num_items; NodeDef node_def; tensorflow::ImmediateExecutionOperation* op = tensorflow::unwrap(tfe_op); node_def.set_name(op->Name()); node_def.set_op(op->Name()); for (int i = 0; i < num_inputs; ++i) { node_def.add_input("dummy_input"); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 15 03:35:10 UTC 2024 - 29.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_variable_runtime_reformatting.cc
if (data_type.getIntOrFloatBitWidth() == 64) continue; const auto& block_arg = replicate.GetBody().getArgument(replicate_arg); int64_t num_inputs = 0; if (replicate.IsReplicatedBlockArgument(block_arg)) { num_inputs = num_replicas; } else { num_inputs = 1; } // We have found a mirrored variable which is an input to the replicated
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 21.9K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util.cc
return variable_lookup; } } // anonymous namespace std::vector<const Tensor*> InputsFromContext(OpKernelContext* ctx) { std::vector<const Tensor*> inputs; inputs.reserve(ctx->num_inputs()); for (int input_idx = 0; input_idx < ctx->num_inputs(); input_idx++) { inputs.push_back(&ctx->input(input_idx)); } return inputs; } absl::StatusOr<std::vector<int>> GetConstantInputIndicesFromContext(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 40.4K bytes - Viewed (0) -
tensorflow/cc/framework/ops.h
/// @addtogroup core /// @{ /// Represents a node in the computation graph. class Operation { public: Operation() : node_(nullptr) {} explicit Operation(Node* n); int32 num_inputs() const { return node_->num_inputs(); } DataType input_type(int32_t o) const { return node_->input_type(o); } Output input(int32_t i) const; int32 num_outputs() const { return node_->num_outputs(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 13 05:57:22 UTC 2024 - 10.5K bytes - Viewed (0) -
tensorflow/c/kernels.cc
} #endif // defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) } int TF_NumInputs(TF_OpKernelContext* ctx) { auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx); return cc_ctx->num_inputs(); } int TF_NumOutputs(TF_OpKernelContext* ctx) { auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx); return cc_ctx->num_outputs(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 22:53:47 UTC 2024 - 36K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/tpu_cluster_formation.cc
auto input = pos_and_input.value(); bool is_packed = input.getIsPacked(); const int num_operands = input->getNumOperands(); int num_inputs = is_packed ? 1 : num_replicas; if (num_operands != num_inputs) return input->emitOpError() << "requires " << num_inputs << " operands"; if (is_packed) { packed_inputs.push_back(input->getOperand(0)); packed_ops.push_back(input); } else {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 22:03:30 UTC 2024 - 39.3K bytes - Viewed (0) -
tensorflow/compiler/jit/shape_inference.cc
switch_input == n; if (is_loop_invariant) { shape_inference::InferenceContext* context = shape_refiner->GetContext(n); for (int i = 0; i < n->num_inputs(); i++) { const Node* input_node; if (n->input_node(i, &input_node).ok()) { auto shapes_and_types = context->input_handle_shapes_and_types(i); if (shapes_and_types) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 00:41:19 UTC 2024 - 13K bytes - Viewed (0) -
tensorflow/c/eager/c_api.cc
} void TFE_OpAddInputList(TFE_Op* op, TFE_TensorHandle** inputs, int num_inputs, TF_Status* status) { status->status = tensorflow::unwrap(op)->AddInputList( {reinterpret_cast<tensorflow::AbstractTensorHandle**>( tensorflow::unwrap(inputs)), static_cast<size_t>(num_inputs)}); } extern int TFE_OpGetFlatInputCount(const TFE_Op* op, TF_Status* status) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 08:11:23 UTC 2024 - 44K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.cc
// TODO(ycao): Support computation with compile-time constant, which requires // non-trivial input mapping as implemented now. void GetInputMappingForMlir(int num_inputs, std::vector<int>* input_mapping) { input_mapping->resize(num_inputs, 0); std::iota(input_mapping->begin(), input_mapping->end(), 0); } static void RegisterDialects(mlir::DialectRegistry& registry) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 17:24:39 UTC 2024 - 45.3K bytes - Viewed (0) -
tensorflow/c/kernels_experimental.cc
} } bool TF_IsRefInput(TF_OpKernelContext* ctx, int i, TF_Status* status) { auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx); if (i < 0 || i >= cc_ctx->num_inputs()) { TF_SetStatus(status, TF_OUT_OF_RANGE, "input index out of range"); return false; } TF_SetStatus(status, TF_OK, ""); return cc_ctx->input_is_ref(i); } #ifndef IS_MOBILE_PLATFORM
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 23 06:12:29 UTC 2024 - 30.9K bytes - Viewed (0)