Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 14 of 14 for grad_inputs_ (0.33 sec)

  1. tensorflow/cc/gradients/grad_testutil.cc

                            const std::vector<Output>& grad_inputs,
                            std::vector<Output>* grad_outputs) {
      ops::GradFunc grad_fn;
      TF_RETURN_IF_ERROR(ops::GradOpRegistry::Global()->Lookup(
          op.node()->type_string(), &grad_fn));
      TF_RETURN_IF_ERROR(grad_fn(scope, op, grad_inputs, grad_outputs));
      TF_RETURN_IF_ERROR(scope.status());
      return absl::OkStatus();
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Feb 27 14:07:19 UTC 2024
    - 1.3K bytes
    - Viewed (0)
  2. tensorflow/cc/gradients/data_flow_grad.cc

    REGISTER_NO_GRADIENT_OP("GetSessionTensor");
    REGISTER_NO_GRADIENT_OP("DeleteSessionTensor");
    
    Status DynamicPartitionGrad(const Scope& scope, const Operation& op,
                                const std::vector<Output>& grad_inputs,
                                std::vector<Output>* grad_outputs) {
      // DynamicPartition only moves input values into various positions
      // in the output, so the gradient operation only has to map incoming
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Jul 24 13:40:35 UTC 2021
    - 5.8K bytes
    - Viewed (0)
  3. tensorflow/c/eager/gradients.h

    //  public:
    //   Status Compute(Context* ctx,
    //                  absl::Span<AbstractTensorHandle* const> grad_inputs,
    //                  absl::Span<AbstractTensorHandle*> grad_outputs) override {
    //     grad_outputs[0] = grad_inputs[0];
    //     grad_outputs[1] = grad_inputs[0];
    //     grad_outputs[0]->Ref();
    //     grad_outputs[1]->Ref();
    //     return OkStatus();
    //   }
    //   ~AddGradientFunction() override {}
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Sep 26 10:27:05 UTC 2022
    - 6.9K bytes
    - Viewed (0)
  4. tensorflow/cc/gradients/README.md

    2.  Write the op gradient with the following naming scheme:
    
        ```
        Status OpNameGrad(const Scope& scope, const Operation& op,
                          const std::vector<Output>& grad_inputs,
                          std::vector<Output>* grad_outputs) {
          ...
          return scope.status();
        }
        REGISTER_GRADIENT_OP("OpName", OpNameGrad);
        ```
    
    3.  Ops gradients are implemented by using the
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Nov 29 19:12:55 UTC 2021
    - 2K bytes
    - Viewed (0)
Back to top