Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 3 of 3 for Hare (0.2 sec)

  1. tensorflow/c/experimental/gradients/nn_grad.cc

                     absl::Span<AbstractTensorHandle* const> grad_outputs,
                     absl::Span<AbstractTensorHandle*> grad_inputs) override {
        /* Given upstream grad U and a BiasAdd: A + bias, the gradients are:
         *
         *    dA = U
         *    dbias = reduceSum(U, dims = channel_dim)
         */
    
        AbstractTensorHandle* upstream_grad = grad_outputs[0];
        DCHECK(upstream_grad);
    
    C++
    - Registered: Tue Mar 26 12:39:09 GMT 2024
    - Last Modified: Wed Feb 28 13:53:47 GMT 2024
    - 5.7K bytes
    - Viewed (0)
  2. tensorflow/c/experimental/gradients/array_grad.cc

        for (int i = 0; i < grad_outputs.size(); i++) {
          auto grad_input = grad_outputs[i];
          // TODO(srbs): Should we add a copy contructor to AbstractTensorHandle
          // that takes care of this similar to `Tensor`?
          if (grad_input) {
            grad_input->Ref();
          }
          grad_inputs[i] = grad_input;
        }
        return absl::OkStatus();
      }
      ~IdentityNGradientFunction() override {}
    };
    C++
    - Registered: Tue Apr 09 12:39:09 GMT 2024
    - Last Modified: Wed Feb 28 13:53:47 GMT 2024
    - 1.6K bytes
    - Viewed (0)
  3. tensorflow/c/eager/unified_api_testutil.cc

        const char* fn_name = "test_fn";
        core::RefCountPtr<AbstractFunction> scoped_func;
        // Returning null tensors from a tf.function is not supported, so we keep
        // track of indices in the model's outputs are nullptr in this set.
        // The FunctionDef only outputs the non-null tensors. We later pad the
        // function op outputs to have nullptrs at the `null_indices`.
        absl::flat_hash_set<int> null_indices;
        {
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Tue Feb 27 13:57:45 GMT 2024
    - 5.7K bytes
    - Viewed (0)
Back to top