- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 24 for stream_executors (0.21 sec)
-
tensorflow/compiler/jit/xla_device.cc
if (it != state.allocators_.end()) { return it->second.get(); } std::unique_ptr<XlaDeviceAllocator> alloc = std::make_unique<XlaDeviceAllocator>( backend->stream_executors()[device_ordinal]); XlaDeviceAllocator* alloc_ptr = alloc.get(); state.allocators_[{backend, device_ordinal}] = std::move(alloc); return alloc_ptr; } namespace {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 21:05:42 UTC 2024 - 24.3K bytes - Viewed (0) -
tensorflow/c/experimental/stream_executor/stream_executor.cc
#include "tensorflow/c/experimental/stream_executor/stream_executor_internal.h" #include "tensorflow/c/tf_status_helper.h" #include "xla/stream_executor/executor_cache.h" #include "xla/stream_executor/host_memory_allocation.h" #include "xla/stream_executor/memory_allocation.h" #include "xla/stream_executor/platform.h" #include "xla/stream_executor/platform_manager.h" #include "xla/stream_executor/stream.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jun 14 07:39:19 UTC 2024 - 27.1K bytes - Viewed (0) -
tensorflow/c/experimental/stream_executor/stream_executor.h
size_t struct_size; void* ext; // reserved for future use SP_StreamExecutor* stream_executor; // output, to be filled by plugin } SE_CreateStreamExecutorParams; #define SE_CREATE_STREAM_EXECUTOR_PARAMS_STRUCT_SIZE \ TF_OFFSET_OF_END(SE_CreateStreamExecutorParams, stream_executor) typedef struct SP_Platform { size_t struct_size; void* ext; // free-form data set by plugin
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Aug 24 08:40:35 UTC 2022 - 21.6K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device_context.cc
#include "tsl/platform/statusor.h" namespace tensorflow { // The allocator used for Tensors assigned to the XLA device. XlaDeviceAllocator::XlaDeviceAllocator( stream_executor::StreamExecutor* stream_executor) : stream_executor_(stream_executor) {} XlaDeviceAllocator::~XlaDeviceAllocator() = default; std::string XlaDeviceAllocator::Name() { return "xla"; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 12.7K bytes - Viewed (0) -
tensorflow/c/experimental/stream_executor/stream_executor_test.cc
#include "tensorflow/c/experimental/stream_executor/stream_executor.h" #include <functional> #include <utility> #include "tensorflow/c/experimental/stream_executor/stream_executor_internal.h" #include "tensorflow/c/experimental/stream_executor/stream_executor_test_util.h" #include "xla/stream_executor/event.h" #include "xla/stream_executor/platform_manager.h" #include "xla/stream_executor/stream.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 19:54:04 UTC 2024 - 26.5K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_platform_info.cc
.ok()) { // If we are on an XlaDevice, use the underlying XLA platform's allocator // directly. We could use the StreamExecutor's allocator which may // theoretically be more correct, but XLA returns a nice OOM message in a // Status and StreamExecutor does not. // // Importantly we can't use ctx->device()->GetAllocator() as the allocator
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 17:23:27 UTC 2024 - 17.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_tpu_device.cc
#include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/stream_executor/tpu/c_api_conversions.h" #include "xla/stream_executor/tpu/status_helper.h" #include "xla/stream_executor/tpu/tpu_api.h" #include "xla/stream_executor/tpu/tpu_node_context.h" #include "xla/stream_executor/tpu/tpu_platform.h" #include "xla/stream_executor/tpu/tpu_platform_interface.h" #include "xla/stream_executor/tpu/tpu_stream_interface.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 22:53:47 UTC 2024 - 20.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/BUILD
"@local_xla//xla/service/cpu:cpu_compiler", "@local_xla//xla/service/cpu:cpu_transfer_manager", "@local_xla//xla/stream_executor", "@local_xla//xla/stream_executor/host:host_platform", "@local_xla//xla/stream_executor/host:host_platform_id", ], alwayslink = 1, ) cc_library( name = "split_into_island_per_op_pass",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 22:19:26 UTC 2024 - 11K bytes - Viewed (0) -
tensorflow/c/kernels.h
// OpKernelContext routines // TF_GetStream returns the SP_Stream available in ctx. // This function returns a stream only for devices registered using the // StreamExecutor C API // (tensorflow/c/experimental/stream_executor/stream_executor.h). It will return // nullptr and set error status in all other cases. // Experimental: this function doesn't have compatibility guarantees and subject // to change at any time.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jan 09 22:46:22 UTC 2024 - 24.6K bytes - Viewed (0) -
tensorflow/c/kernels.cc
#include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "tensorflow/c/c_api.h" #include "tensorflow/c/c_api_internal.h" #include "tensorflow/c/c_api_macros.h" #include "tensorflow/c/experimental/stream_executor/stream_executor.h" #include "tensorflow/c/tf_buffer.h" #include "tensorflow/c/tf_buffer_internal.h" #include "tensorflow/c/tf_datatype.h" #include "tensorflow/c/tf_status.h" #include "tensorflow/c/tf_status_helper.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 22:53:47 UTC 2024 - 36K bytes - Viewed (0)