- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 24 for stream_executors (0.34 sec)
-
tensorflow/compiler/jit/xla_device.cc
if (it != state.allocators_.end()) { return it->second.get(); } std::unique_ptr<XlaDeviceAllocator> alloc = std::make_unique<XlaDeviceAllocator>( backend->stream_executors()[device_ordinal]); XlaDeviceAllocator* alloc_ptr = alloc.get(); state.allocators_[{backend, device_ordinal}] = std::move(alloc); return alloc_ptr; } namespace {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 21:05:42 UTC 2024 - 24.3K bytes - Viewed (0) -
tensorflow/c/experimental/stream_executor/stream_executor.cc
#include "tensorflow/c/experimental/stream_executor/stream_executor_internal.h" #include "tensorflow/c/tf_status_helper.h" #include "xla/stream_executor/executor_cache.h" #include "xla/stream_executor/host_memory_allocation.h" #include "xla/stream_executor/memory_allocation.h" #include "xla/stream_executor/platform.h" #include "xla/stream_executor/platform_manager.h" #include "xla/stream_executor/stream.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jun 14 07:39:19 UTC 2024 - 27.1K bytes - Viewed (0) -
tensorflow/c/experimental/stream_executor/stream_executor_internal.h
#include "xla/stream_executor/executor_cache.h" #include "xla/stream_executor/platform.h" #include "xla/stream_executor/stream_common.h" #include "xla/stream_executor/stream_executor.h" #include "tsl/platform/statusor.h" namespace stream_executor { // Plugin initialization function that a device plugin // must define. typedef void (*SEInitPluginFn)(SE_PlatformRegistrationParams* const, TF_Status* const);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jun 14 07:39:19 UTC 2024 - 8K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device_context.cc
#include "tsl/platform/statusor.h" namespace tensorflow { // The allocator used for Tensors assigned to the XLA device. XlaDeviceAllocator::XlaDeviceAllocator( stream_executor::StreamExecutor* stream_executor) : stream_executor_(stream_executor) {} XlaDeviceAllocator::~XlaDeviceAllocator() = default; std::string XlaDeviceAllocator::Name() { return "xla"; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 12.7K bytes - Viewed (0) -
tensorflow/c/experimental/stream_executor/stream_executor_test.cc
#include "tensorflow/c/experimental/stream_executor/stream_executor.h" #include <functional> #include <utility> #include "tensorflow/c/experimental/stream_executor/stream_executor_internal.h" #include "tensorflow/c/experimental/stream_executor/stream_executor_test_util.h" #include "xla/stream_executor/event.h" #include "xla/stream_executor/platform_manager.h" #include "xla/stream_executor/stream.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 19:54:04 UTC 2024 - 26.5K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_host_send_recv_device_context_test.cc
#include "tensorflow/compiler/tf2xla/shape_util.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/stream_executor/device_memory.h" #include "xla/stream_executor/platform_manager.h" #include "xla/stream_executor/stream.h" #include "xla/stream_executor/stream_executor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tsl/lib/core/status_test_util.h" namespace tensorflow { namespace {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 7.2K bytes - Viewed (0) -
tensorflow/c/experimental/stream_executor/BUILD
) cc_library( name = "stream_executor_hdrs", hdrs = ["stream_executor.h"], visibility = ["//tensorflow:internal"], deps = [ "//tensorflow/c:c_api_macros_hdrs", "//tensorflow/c:tf_status_headers", ], ) cc_library( name = "stream_executor", srcs = ["stream_executor.cc"], hdrs = ["stream_executor.h"], visibility = ["//tensorflow:internal"], deps = [
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Jun 08 00:27:07 UTC 2024 - 3.1K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_platform_info.cc
.ok()) { // If we are on an XlaDevice, use the underlying XLA platform's allocator // directly. We could use the StreamExecutor's allocator which may // theoretically be more correct, but XLA returns a nice OOM message in a // Status and StreamExecutor does not. // // Importantly we can't use ctx->device()->GetAllocator() as the allocator
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 17:23:27 UTC 2024 - 17.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate_registration.cc
#include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "xla/client/client_library.h" #include "xla/client/compile_only_client.h" #include "xla/stream_executor/host/host_platform_id.h" #include "xla/stream_executor/platform_manager.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/graph.pb.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 22:19:26 UTC 2024 - 7.8K bytes - Viewed (0) -
tensorflow/compiler/jit/BUILD
"@local_tsl//tsl/platform:statusor", "@local_xla//xla/stream_executor/tpu:c_api_conversions", "@local_xla//xla/stream_executor/tpu:status_helper", "@local_xla//xla/stream_executor/tpu:tpu_api", "@local_xla//xla/stream_executor/tpu:tpu_executor_base", "@local_xla//xla/stream_executor/tpu:tpu_node_context", "@local_xla//xla/stream_executor/tpu:tpu_platform_interface",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 00:41:19 UTC 2024 - 61.5K bytes - Viewed (0)