- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 49 for stream_executors (0.2 sec)
-
tensorflow/compiler/jit/xla_device_compiler_client.cc
return client_->Load(serialized_executable, build_options); } void XlaDeviceCompilerClient::WaitForProgramsToFinish() { if (client_ == nullptr) return; for (auto* executor : client_->backend().stream_executors()) { bool ok = executor->SynchronizeAllActivity(); if (!ok) { LOG(ERROR) << "Error synchronizing activity while waiting for all " "programs to complete"; } } }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 4.1K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device.cc
if (it != state.allocators_.end()) { return it->second.get(); } std::unique_ptr<XlaDeviceAllocator> alloc = std::make_unique<XlaDeviceAllocator>( backend->stream_executors()[device_ordinal]); XlaDeviceAllocator* alloc_ptr = alloc.get(); state.allocators_[{backend, device_ordinal}] = std::move(alloc); return alloc_ptr; } namespace {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 21:05:42 UTC 2024 - 24.3K bytes - Viewed (0) -
tensorflow/c/experimental/stream_executor/stream_executor.cc
#include "tensorflow/c/experimental/stream_executor/stream_executor_internal.h" #include "tensorflow/c/tf_status_helper.h" #include "xla/stream_executor/executor_cache.h" #include "xla/stream_executor/host_memory_allocation.h" #include "xla/stream_executor/memory_allocation.h" #include "xla/stream_executor/platform.h" #include "xla/stream_executor/platform_manager.h" #include "xla/stream_executor/stream.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jun 14 07:39:19 UTC 2024 - 27.1K bytes - Viewed (0) -
tensorflow/c/experimental/stream_executor/stream_executor_internal.h
#include "xla/stream_executor/executor_cache.h" #include "xla/stream_executor/platform.h" #include "xla/stream_executor/stream_common.h" #include "xla/stream_executor/stream_executor.h" #include "tsl/platform/statusor.h" namespace stream_executor { // Plugin initialization function that a device plugin // must define. typedef void (*SEInitPluginFn)(SE_PlatformRegistrationParams* const, TF_Status* const);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jun 14 07:39:19 UTC 2024 - 8K bytes - Viewed (0) -
tensorflow/c/experimental/stream_executor/stream_executor.h
size_t struct_size; void* ext; // reserved for future use SP_StreamExecutor* stream_executor; // output, to be filled by plugin } SE_CreateStreamExecutorParams; #define SE_CREATE_STREAM_EXECUTOR_PARAMS_STRUCT_SIZE \ TF_OFFSET_OF_END(SE_CreateStreamExecutorParams, stream_executor) typedef struct SP_Platform { size_t struct_size; void* ext; // free-form data set by plugin
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Aug 24 08:40:35 UTC 2022 - 21.6K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device_context.h
bool ClearStats() override; private: // The stream executor of the device. se::StreamExecutor* stream_executor_; }; // Helper class for managing data transfers between host and XLA devices. class XlaDeviceContext : public DeviceContext { public: explicit XlaDeviceContext( std::shared_ptr<se::Stream> compute_stream, std::shared_ptr<se::Stream> host_to_device_stream,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 5.1K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device_context.cc
#include "tsl/platform/statusor.h" namespace tensorflow { // The allocator used for Tensors assigned to the XLA device. XlaDeviceAllocator::XlaDeviceAllocator( stream_executor::StreamExecutor* stream_executor) : stream_executor_(stream_executor) {} XlaDeviceAllocator::~XlaDeviceAllocator() = default; std::string XlaDeviceAllocator::Name() { return "xla"; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 12.7K bytes - Viewed (0) -
tensorflow/c/experimental/stream_executor/stream_executor_test.cc
#include "tensorflow/c/experimental/stream_executor/stream_executor.h" #include <functional> #include <utility> #include "tensorflow/c/experimental/stream_executor/stream_executor_internal.h" #include "tensorflow/c/experimental/stream_executor/stream_executor_test_util.h" #include "xla/stream_executor/event.h" #include "xla/stream_executor/platform_manager.h" #include "xla/stream_executor/stream.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 19:54:04 UTC 2024 - 26.5K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_host_send_recv_device_context_test.cc
#include "tensorflow/compiler/tf2xla/shape_util.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/stream_executor/device_memory.h" #include "xla/stream_executor/platform_manager.h" #include "xla/stream_executor/stream.h" #include "xla/stream_executor/stream_executor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tsl/lib/core/status_test_util.h" namespace tensorflow { namespace {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 7.2K bytes - Viewed (0) -
tensorflow/c/experimental/stream_executor/BUILD
) cc_library( name = "stream_executor_hdrs", hdrs = ["stream_executor.h"], visibility = ["//tensorflow:internal"], deps = [ "//tensorflow/c:c_api_macros_hdrs", "//tensorflow/c:tf_status_headers", ], ) cc_library( name = "stream_executor", srcs = ["stream_executor.cc"], hdrs = ["stream_executor.h"], visibility = ["//tensorflow:internal"], deps = [
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Jun 08 00:27:07 UTC 2024 - 3.1K bytes - Viewed (0)