Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 14 for grad_inputs_ (0.15 sec)

  1. tensorflow/c/experimental/gradients/array_grad.cc

                     absl::Span<AbstractTensorHandle*> grad_inputs) override {
        for (int i = 0; i < grad_outputs.size(); i++) {
          auto grad_input = grad_outputs[i];
          // TODO(srbs): Should we add a copy contructor to AbstractTensorHandle
          // that takes care of this similar to `Tensor`?
          if (grad_input) {
            grad_input->Ref();
          }
          grad_inputs[i] = grad_input;
        }
        return absl::OkStatus();
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 28 13:53:47 UTC 2024
    - 1.6K bytes
    - Viewed (0)
  2. tensorflow/cc/gradients/image_grad.cc

                                       std::vector<Output>* grad_outputs) {
      string kernel_type;
      TF_RETURN_IF_ERROR(
          GetNodeAttr(op.node()->attrs(), "kernel_type", &kernel_type));
      bool antialias;
      TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "antialias", &antialias));
      grad_outputs->push_back(internal::ScaleAndTranslateGrad(
          scope, grad_inputs[0], op.input(0), op.input(2), op.input(3),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Nov 11 00:29:23 UTC 2021
    - 5.7K bytes
    - Viewed (0)
  3. tensorflow/cc/framework/grad_op_registry.h

    /// Implementations should add operations to compute the gradient outputs of
    /// 'op' (returned in 'grad_outputs') using 'scope' and 'grad_inputs'.
    typedef Status (*GradFunc)(const Scope& scope, const Operation& op,
                               const std::vector<Output>& grad_inputs,
                               std::vector<Output>* grad_outputs);
    
    /// GradOpRegistry maintains a static registry of gradient functions.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Oct 05 15:33:58 UTC 2022
    - 2.9K bytes
    - Viewed (0)
  4. tensorflow/c/experimental/gradients/nn_grad.cc

                     absl::Span<AbstractTensorHandle*> grad_inputs) override {
        // Grad for Softmax Input
        TF_RETURN_IF_ERROR(BroadcastMul(
            ctx, grad_outputs[0], forward_outputs_[1],
            grad_inputs.subspan(0, 1)));  // upstream_grad * local softmax grad
    
        // Grad for labels is null
        grad_inputs[1] = nullptr;
        return absl::OkStatus();
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 09 06:38:45 UTC 2024
    - 5.7K bytes
    - Viewed (0)
  5. tensorflow/c/experimental/gradients/not_differentiable.cc

    namespace gradients {
    Status NotDifferentiableGradientFunction::Compute(
        AbstractContext* ctx, absl::Span<AbstractTensorHandle* const> grad_outputs,
        absl::Span<AbstractTensorHandle*> grad_inputs) {
      for (int i = 0; i < grad_inputs.size(); i++) {
        grad_inputs[i] = nullptr;
      }
      return absl::OkStatus();
    }
    
    Status RegisterNotDifferentiable(GradientRegistry* registry, const string& op) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 28 13:53:47 UTC 2024
    - 1.3K bytes
    - Viewed (0)
  6. tensorflow/cc/gradients/functional_grad.cc

      const int num_inputs = op.num_inputs();
      func_inputs.reserve(num_inputs + grad_inputs.size());
      input_dtypes.reserve(num_inputs);
    
      for (int i = 0; i < num_inputs; i++) {
        func_inputs.push_back(op.input(i));
        input_dtypes.push_back(op.input_type(i));
      }
    
      func_inputs.insert(std::end(func_inputs), std::begin(grad_inputs),
                         std::end(grad_inputs));
    
      auto grad = SymbolicGradient(scope, func_inputs, input_dtypes, f);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Oct 15 20:09:06 UTC 2021
    - 2.1K bytes
    - Viewed (0)
  7. tensorflow/cc/framework/while_gradients.h

    namespace tensorflow {
    
    // Adds the gradient computation for the while loop associated with
    // `while_ctx`. `grad_inputs` are the partial derivatives w.r.t. the loop
    // outputs, i.e. the exit nodes.  The partial derivatives w.r.t. the loop
    // inputs, i.e. the input loop vars, are returned in `grad_outputs`.
    // `grad_inputs` and `grad_outputs` are both in loop-variable order, as defined
    // by the original inputs to BuildWhileLoop().
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Oct 05 15:48:53 UTC 2022
    - 1.7K bytes
    - Viewed (0)
  8. tensorflow/c/experimental/gradients/custom_gradient_test.cc

                     absl::Span<AbstractTensorHandle* const> grad_outputs,
                     absl::Span<AbstractTensorHandle*> grad_inputs) override {
        CHECK_EQ(grad_outputs.size(), 1);
        CHECK_EQ(grad_inputs.size(), 1);
        grad_inputs[0] = grad_outputs[0];
        if (grad_inputs[0]) {
          grad_inputs[0]->Ref();
        }
        return absl::OkStatus();
      }
    };
    
    // Computes:
    //
    // @tf.custom_gradient
    // def f(input):
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 28 13:53:47 UTC 2024
    - 4.8K bytes
    - Viewed (0)
  9. tensorflow/cc/framework/gradients.h

                                const std::vector<Output>& inputs,
                                const std::vector<Output>& grad_inputs,
                                std::vector<Output>* grad_outputs);
    
    // Same as above, but uses 'OnesLike' for all shapes in
    // 'outputs' as grad_inputs.
    Status AddSymbolicGradients(const Scope& scope,
                                const std::vector<Output>& outputs,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Oct 05 15:45:03 UTC 2022
    - 2.3K bytes
    - Viewed (0)
  10. tensorflow/cc/framework/while_gradients.cc

    Status AddWhileGradientLoop(WhileContext* while_ctx,
                                const std::vector<Output>& grad_inputs,
                                const Output& backprop_execution_pred,
                                const Scope& parent_scope,
                                std::vector<Output>* grad_outputs) {
      DCHECK_EQ(grad_inputs.size(), while_ctx->body_outputs().size());
      DCHECK_EQ(while_ctx->body_inputs().size(), while_ctx->body_outputs().size());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 13 05:57:22 UTC 2024
    - 8.1K bytes
    - Viewed (0)
Back to top