Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 4 of 4 for so (0.14 sec)

  1. tensorflow/c/experimental/gradients/array_grad_test.cc

      TF_RETURN_IF_ERROR(
          ops::IdentityN(ctx, inputs, absl::MakeSpan(temp_outputs), "IdentityN"));
      // Although, `ops::IdentityN` returns 2 tensors, the first tensor isn't needed
      // for computing gradient so we could safely drop it.
      outputs[0] = temp_outputs[1];
      temp_outputs[0]->Unref();
      return absl::OkStatus();
    }
    
    class CppGradients
        : public ::testing::TestWithParam<std::tuple<const char*, bool, bool>> {
    C++
    - Registered: Tue Mar 26 12:39:09 GMT 2024
    - Last Modified: Wed Feb 28 13:53:47 GMT 2024
    - 5K bytes
    - Viewed (0)
  2. tensorflow/c/eager/gradient_checker.cc

      if (num_dims_out == 0) {
        outputs[0] = model_out.release();
        return absl::OkStatus();
      }
    
      // Else, reduce sum the output to get a scalar
    
      // Will sum all dimensions, so get a Tensor containing [0,...,num_dims_out-1].
      AbstractTensorHandlePtr sum_dims;
      {
        vector<int32_t> vals(num_dims_out);
        int64_t vals_shape[] = {num_dims_out};
        Range(&vals, 0, num_dims_out);
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Feb 15 09:49:45 GMT 2024
    - 7.3K bytes
    - Viewed (0)
  3. tensorflow/c/eager/unified_api_testutil.cc

      if (use_function) {
        const char* fn_name = "test_fn";
        core::RefCountPtr<AbstractFunction> scoped_func;
        // Returning null tensors from a tf.function is not supported, so we keep
        // track of indices in the model's outputs are nullptr in this set.
        // The FunctionDef only outputs the non-null tensors. We later pad the
        // function op outputs to have nullptrs at the `null_indices`.
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Tue Feb 27 13:57:45 GMT 2024
    - 5.7K bytes
    - Viewed (0)
  4. tensorflow/c/experimental/gradients/nn_grad_test.cc

      // `gradient_checker` only works with model that returns only 1 tensor.
      // Although, `ops::SparseSoftmaxCrossEntropyWithLogits` returns 2 tensors, the
      // second tensor isn't needed for computing gradient so we could safely drop
      // it.
      outputs[0] = loss;
      backprop->Unref();
      return absl::OkStatus();
    }
    
    Status BiasAddModel(AbstractContext* ctx,
                        absl::Span<AbstractTensorHandle* const> inputs,
    C++
    - Registered: Tue Mar 26 12:39:09 GMT 2024
    - Last Modified: Wed Feb 28 13:53:47 GMT 2024
    - 8.3K bytes
    - Viewed (0)
Back to top