Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 23 for Rake (0.12 sec)

  1. tensorflow/c/eager/parallel_device/parallel_device_remote_test.cc

      TensorHandlePtr combined_value = CreatePerDeviceValues(
          context.get(), in_components, device_name, status.get());
      ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
    
      // Loop to make synchronization failures more deterministic
      for (int i = 0; i < 100; ++i) {
        TensorHandlePtr multiply_result(
            Multiply(context.get(), combined_value.get(), combined_value.get(),
                     status.get()));
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Apr 27 22:09:57 GMT 2023
    - 6.7K bytes
    - Viewed (0)
  2. tensorflow/c/c_api.cc

    // directly, instead of requiring us to serialize to a GraphDef and
    // call Session::Extend().
    bool ExtendSessionGraphHelper(TF_Session* session, TF_Status* status) {
      if (session->graph != nullptr) {
        // Take the graph lock before the session lock to avoid deadlock. This is
        // safe since session->graph does not change.
        session->graph->mu.lock();
        mutex_lock session_lock(session->mu);
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Mon Apr 15 03:35:10 GMT 2024
    - 102.3K bytes
    - Viewed (0)
  3. tensorflow/c/c_api_experimental.cc

      // Update the vector with information from `input_tensors` if provided.
      if (input_tensors != nullptr) {
        // Note that we take the address of the elements in `all_input_tensors`
        // below. Allocate enough space so that no reallocation happens, which will
        // make the pointers invalid.
        all_input_tensors.reserve(num_inputs);
        for (int i = 0; i < num_inputs; ++i) {
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Mon Apr 15 03:35:10 GMT 2024
    - 29.4K bytes
    - Viewed (0)
  4. tensorflow/c/eager/gradients.cc

        int64_t target_id = target_tensor_ids[i];
        if (sources_set.find(target_id) != sources_set.end()) {
          auto tensor = targets[i];
          sources_that_are_targets.insert(
              std::make_pair(target_id, TapeTensor(tensor)));
        }
      }
    
      TF_RETURN_IF_ERROR(GradientTape::ComputeGradient(
          vspace, target_tensor_ids, source_tensor_ids, sources_that_are_targets,
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Feb 15 09:49:45 GMT 2024
    - 19.3K bytes
    - Viewed (0)
  5. tensorflow/c/experimental/next_pluggable_device/tensor_pjrt_buffer_util_test.cc

      CHECK_OK(buffer.status());
    
      return new PJRT_Buffer{std::move(*buffer), c_api_client->pjrt_c_client()};
    }
    
    TEST(TensorPjRtBufferUtilTest, GetPjRtCBufferFromTensorNoBuffer) {
      auto allocator = std::make_unique<AsyncValueAllocator>();
      tensorflow::Tensor tensor(allocator.get(), DT_FLOAT, {1});
    
      EXPECT_THAT(
          GetPjRtCBufferFromTensor(&tensor),
          StatusIs(error::INTERNAL, HasSubstr(absl::StrCat(
    C++
    - Registered: Tue Feb 27 12:39:08 GMT 2024
    - Last Modified: Mon Oct 30 19:20:20 GMT 2023
    - 7.2K bytes
    - Viewed (0)
  6. tensorflow/c/experimental/grappler/grappler_test.cc

      std::unique_ptr<SingleMachine> cluster(new SingleMachine(5 * 60, 3, 0));
      TF_ASSERT_OK(cluster->Provision());
    
      TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
                                              cluster->GetDeviceNames());
      GrapplerItem item;
      CHECK(fake_input.NextItem(&item));
    
      TF_Status* status = TF_NewStatus();
      TF_GraphProperties* graph_properties =
    C++
    - Registered: Tue Feb 27 12:39:08 GMT 2024
    - Last Modified: Thu Apr 13 22:30:58 GMT 2023
    - 11.6K bytes
    - Viewed (0)
  7. tensorflow/c/eager/c_api_distributed_test.cc

      TFE_TensorHandle* h0 = TestVariable(ctx, 1.0, task1_name);
      TFE_TensorHandle* h1 = TestVariable(ctx, 2.0, task2_name);
      TFE_TensorHandle* h2 = TestVariable(ctx, 3.0, task0_name);
    
      // Add a sync point to make sure that variables have been initialized
      // before the function execution starts.
      TFE_ContextAsyncWait(ctx, status);
      EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Feb 15 09:49:45 GMT 2024
    - 23.5K bytes
    - Viewed (0)
  8. tensorflow/c/eager/gradient_checker.cc

        theta_inputs[input_index] = thetaMinus.get();
        TF_RETURN_IF_ERROR(
            RunAndMaybeSum(ctx, forward, theta_inputs, f_outputs, use_function));
        AbstractTensorHandlePtr fMinus(f_outputs[0]);
    
        // Take Difference of both estimates: (f(theta + eps) - f(theta - eps)).
        TF_RETURN_IF_ERROR(
            ops::Sub(ctx, fPlus.get(), fMinus.get(), f_outputs, "sub_top"));
        AbstractTensorHandlePtr fDiff(f_outputs[0]);
    
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Feb 15 09:49:45 GMT 2024
    - 7.3K bytes
    - Viewed (0)
  9. tensorflow/c/eager/gradients_test.cc

      int num_retvals = 1;
      std::vector<AbstractTensorHandle*> outputs(1);
      GradientRegistry registry;
      s = RegisterGradients(&registry);
      ASSERT_EQ(errors::OK, s.code()) << s.message();
      auto tape = std::make_unique<Tape>(/*persistent=*/false);
      s = Execute(check_numerics_op.get(), ctx.get(), absl::MakeSpan(outputs),
                  &num_retvals, &forward_op, tape.get(), registry);
      ASSERT_EQ(errors::OK, s.code()) << s.message();
    
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Feb 15 09:49:45 GMT 2024
    - 7K bytes
    - Viewed (0)
  10. tensorflow/c/eager/parallel_device/parallel_device_lib.cc

    ParallelDevice::ParallelDevice(const std::vector<std::string>& devices,
                                   bool is_async, int in_flight_nodes_limit)
        : underlying_devices_(devices),
          default_cancellation_manager_(absl::make_unique<CancellationManager>()) {
      device_threads_.reserve(devices.size());
      for (int device_index = 0; device_index < devices.size(); ++device_index) {
        device_threads_.emplace_back(new DeviceThread(
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Fri Feb 09 07:47:20 GMT 2024
    - 25.4K bytes
    - Viewed (1)
Back to top