Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 7 of 7 for Collective (0.39 sec)

  1. tensorflow/c/eager/parallel_device/parallel_device_lib_test.cc

      std::unique_ptr<ParallelTensor> run_collective =
          parallel_device.ScalarsFromSequence<bool>({true, true}, context.get(),
                                                    status.get());
      ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
      auto outputs = parallel_device.Execute(
          context.get(), {reduced_values.get(), run_collective.get()},
          "AssertAndCollective", TFE_OpGetAttrs(call_op.get()),
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Jul 08 23:47:35 GMT 2021
    - 15.3K bytes
    - Viewed (0)
  2. tensorflow/c/c_api_experimental.cc

                                                      TF_Status* status) {
      tensorflow::EagerContext* context =
          tensorflow::ContextFromInterface(tensorflow::unwrap(ctx));
      auto collective_executor_handle = context->GetCollectiveExecutorHandle();
      collective_executor_handle->get()->StartAbort(status->status);
    }
    
    TF_CAPI_EXPORT extern void TFE_CollectiveOpsCheckPeerHealth(
        TFE_Context* ctx, const char* task, int64_t timeout_in_ms,
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Mon Apr 15 03:35:10 GMT 2024
    - 29.4K bytes
    - Viewed (0)
  3. tensorflow/c/eager/parallel_device/parallel_device.cc

            // just copy-off but includes a sum) and consideration of performance.
            //
            // TODO(allenl): There may be smarter ways to do this copy in some
            // cases, i.e. with a collective broadcast. We'll need to be careful
            // about things that are taken as inputs on the host or on their
            // existing device (for multi-device functions).
            std::unique_ptr<ParallelTensor> parallel_tensor(
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Wed Mar 29 22:05:31 GMT 2023
    - 18.3K bytes
    - Viewed (0)
  4. tensorflow/c/eager/parallel_device/parallel_device_test.cc

      TensorHandlePtr parallel_value = CreatePerDeviceValues(
          context.get(), components, device_name, status.get());
      ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
    
      // Run a collective sum, so each component should now be the same.
      TensorHandlePtr reduced(
          CollectiveSum(context.get(), parallel_value.get(), 2, status.get()));
      ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Jul 08 23:47:35 GMT 2021
    - 29.3K bytes
    - Viewed (1)
  5. tensorflow/c/eager/parallel_device/parallel_device_lib.cc

                            const int in_flight_nodes_limit)
          : status_(TF_NewStatus()),
            // If the context's default exector is set to async, re-using that in
            // each thread would cause collectives to deadlock. For consistency we
            // create a new sync executor for every thread.
            //
            // TODO(allenl): We should have an async API that works with the
            // parallel device.
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Fri Feb 09 07:47:20 GMT 2024
    - 25.4K bytes
    - Viewed (1)
  6. tensorflow/c/eager/c_api_test_util.cc

        job_def->mutable_tasks()->insert(
            {i, tensorflow::strings::StrCat("localhost:", port)});
      }
      auto* config = server_def.mutable_default_session_config();
      config->mutable_experimental()->set_collective_group_leader(
          tensorflow::strings::StrCat("/job:", job_name, "/replica:0/task:", 0));
      auto* rewrite_options =
          config->mutable_graph_options()->mutable_rewrite_options();
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Wed Feb 21 22:37:46 GMT 2024
    - 23.5K bytes
    - Viewed (2)
  7. tensorflow/c/eager/c_api.cc

              opts->device_placement_policy),
          opts->async, device_mgr.release(),
          /*device_mgr_owned*/ true, std::move(r),
          /*cluster_flr=*/nullptr,
          /*collective_executor_mgr=*/nullptr,
          /*run_eager_op_as_function=*/opts->run_eager_op_as_function,
          /*jit_compile_rewrite=*/opts->jit_compile_rewrite);
    #if !defined(IS_MOBILE_PLATFORM)
      eager_context->SetDistributedManager(
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Tue Mar 12 20:00:09 GMT 2024
    - 43.9K bytes
    - Viewed (2)
Back to top