Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 57 for num_inputs (0.45 sec)

  1. tensorflow/c/experimental/saved_model/core/tf_concrete_function_loading_test.cc

          : TensorHandleConvertible(
                testing::CreateTensorHandle(ctx, DT_FLOAT, {2, 4}, value)) {}
    };
    
    FunctionDef FuncDefWithNumInputsOutputs(int num_inputs, int num_outputs) {
      FunctionDef func;
      OpDef* signature = func.mutable_signature();
      for (int i = 0; i < num_inputs; ++i) {
        signature->add_input_arg();
      }
      for (int i = 0; i < num_outputs; ++i) {
        signature->add_output_arg();
      }
      return func;
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Apr 14 19:16:58 UTC 2023
    - 10.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/xla_launch_util.cc

      return variable_lookup;
    }
    
    }  // anonymous namespace
    
    std::vector<const Tensor*> InputsFromContext(OpKernelContext* ctx) {
      std::vector<const Tensor*> inputs;
      inputs.reserve(ctx->num_inputs());
      for (int input_idx = 0; input_idx < ctx->num_inputs(); input_idx++) {
        inputs.push_back(&ctx->input(input_idx));
      }
      return inputs;
    }
    
    absl::StatusOr<std::vector<int>> GetConstantInputIndicesFromContext(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 00:36:08 UTC 2024
    - 40.4K bytes
    - Viewed (0)
  3. tensorflow/c/eager/c_api_unified_experimental.cc

                   absl::StatusMessageAsCStr(status));
    }
    
    void TF_ExecuteOperation(TF_AbstractOp* op, int num_inputs,
                             TF_AbstractTensor* const* inputs, TF_OutputList* o,
                             TF_Status* s) {
      for (int i = 0; i < num_inputs; i++) {
        tsl::Set_TF_Status_from_Status(s, unwrap(op)->AddInput(unwrap(inputs[i])));
        if (TF_GetCode(s) != TF_OK) {
          return;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 10:15:17 UTC 2024
    - 9K bytes
    - Viewed (0)
  4. tensorflow/c/ops_test.cc

    }
    
    #define C_CTX(x) reinterpret_cast<TF_ShapeInferenceContext*>(x)
    #define C_SHP(x) reinterpret_cast<TF_ShapeHandle*>(x)
    
    static OpDef MakeOpDef(int num_inputs, int num_outputs) {
      OpRegistrationData op_reg_data;
      OpDefBuilder b("dummy");
      for (int i = 0; i < num_inputs; ++i) {
        b.Input(strings::StrCat("i", i, ": float"));
      }
      for (int i = 0; i < num_outputs; ++i) {
        b.Output(strings::StrCat("o", i, ": float"));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Aug 11 01:20:50 UTC 2021
    - 12.6K bytes
    - Viewed (0)
  5. tensorflow/cc/gradients/nn_grad.cc

            "FusedBatchNorm requires at least 5 outputs");
      }
      if (grad_inputs.empty()) {
        return errors::InvalidArgument("FusedBatchNorm grad requires 1 grad input");
      }
      if (op.num_inputs() < 3) {
        return errors::InvalidArgument("FusedBatchNorm has too few inputs");
      }
    
      Output x = op.input(0);
      Output grad_y = grad_inputs[0];
      Output scale = op.input(1);
      float epsilon;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 27 23:34:33 UTC 2022
    - 24.5K bytes
    - Viewed (0)
  6. tensorflow/cc/framework/ops.h

    /// @addtogroup core
    /// @{
    
    /// Represents a node in the computation graph.
    class Operation {
     public:
      Operation() : node_(nullptr) {}
      explicit Operation(Node* n);
    
      int32 num_inputs() const { return node_->num_inputs(); }
      DataType input_type(int32_t o) const { return node_->input_type(o); }
      Output input(int32_t i) const;
    
      int32 num_outputs() const { return node_->num_outputs(); }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 13 05:57:22 UTC 2024
    - 10.5K bytes
    - Viewed (0)
  7. tensorflow/cc/gradients/math_grad.cc

      // hence dx_k = dy for all x_k
      // So the gradient for AddN just transfers the incoming gradient to
      // all outgoing gradients.
      auto incoming = Identity(scope, grad_inputs[0]);
      for (int32_t i = 0; i < op.num_inputs(); ++i) {
        grad_outputs->push_back(incoming);
      }
      return scope.status();
    }
    REGISTER_GRADIENT_OP("AddN", AddNGrad);
    
    Status PowGrad(const Scope& scope, const Operation& op,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Aug 25 18:20:20 UTC 2023
    - 50.7K bytes
    - Viewed (0)
  8. tensorflow/c/eager/parallel_device/parallel_device.cc

          reinterpret_cast<NamedParallelDevice*>(device_info);
      std::vector<MaybeParallelTensorUnowned> typed_inputs;
      int num_inputs = TFE_OpGetFlatInputCount(original_op, status);
      if (TF_GetCode(status) != TF_OK) return;
      typed_inputs.reserve(num_inputs);
      for (int i = 0; i < num_inputs; ++i) {
        TFE_TensorHandle* input = TFE_OpGetFlatInput(original_op, i, status);
        if (TF_GetCode(status) != TF_OK) return;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 29 22:05:31 UTC 2023
    - 18.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/jit/xla_kernel_creator_test.cc

      ASSERT_TRUE(status.ok()) << status.ToString();
    
      EXPECT_EQ("XTimesY", kernel_->name());
      EXPECT_EQ("XTimesY", kernel_->type_string());
    
      EXPECT_EQ(2, kernel_->num_inputs());
      EXPECT_EQ(DT_FLOAT, kernel_->input_type(0));
      EXPECT_EQ(DT_RESOURCE, kernel_->input_type(1));
      EXPECT_EQ(DEVICE_MEMORY, kernel_->input_memory_types()[0]);
      EXPECT_EQ(HOST_MEMORY, kernel_->input_memory_types()[1]);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 16 01:39:55 UTC 2023
    - 5.7K bytes
    - Viewed (0)
  10. tensorflow/c/kernels.cc

      }
    #endif  // defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD)
    }
    
    int TF_NumInputs(TF_OpKernelContext* ctx) {
      auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx);
      return cc_ctx->num_inputs();
    }
    
    int TF_NumOutputs(TF_OpKernelContext* ctx) {
      auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx);
      return cc_ctx->num_outputs();
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 22:53:47 UTC 2024
    - 36K bytes
    - Viewed (0)
Back to top