- Sort Score
- Result 10 results
- Languages All
Results 1 - 5 of 5 for xla_allocator_ (0.45 sec)
-
tensorflow/compiler/jit/xla_device.cc
if (attr.on_host()) { return cpu_allocator(); } if (xla_allocator_ == nullptr) { if (UsePjRtForSingleDeviceCompilation(device_name_)) { VLOG(1) << "XlaDevice " << this << " uses AsyncValueAllocator"; pjrt_allocator_ = std::make_unique<AsyncValueAllocator>(); xla_allocator_ = pjrt_allocator_.get(); } else {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 21:05:42 UTC 2024 - 24.3K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util.h
const xla::HloInputOutputAliasConfig& input_output_alias, const std::map<int, const Tensor*>& resource_vars); private: xla::LocalClient* client_; se::DeviceMemoryAllocator* xla_allocator_; bool allocate_xla_tensors_; bool use_multiple_streams_; int device_ordinal_; }; // A simple TensorBuffer implementation that allows us to create Tensors that
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 11.8K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device.h
se::Platform* const platform_; // Not owned. // Intra-op threads to spawn (from SessionOptions). const int intra_op_parallelism_threads_; // Memory allocator associated with this device. Allocator* xla_allocator_ TF_GUARDED_BY(mu_) = nullptr; // Not owned. std::unique_ptr<AsyncValueAllocator> pjrt_allocator_ TF_GUARDED_BY(mu_); // Stream associated with this device. Operations enqueued on this
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 13.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util.cc
} XlaComputationLaunchContext::XlaComputationLaunchContext( xla::LocalClient* client, se::DeviceMemoryAllocator* xla_allocator, int device_ordinal, bool allocate_xla_tensors, bool use_multiple_streams) : client_(client), xla_allocator_(xla_allocator), allocate_xla_tensors_(allocate_xla_tensors), use_multiple_streams_(use_multiple_streams), device_ordinal_(device_ordinal) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 40.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_platform_info.cc
// theoretically be more correct, but XLA returns a nice OOM message in a // Status and StreamExecutor does not. // // Importantly we can't use ctx->device()->GetAllocator() as the allocator // (which xla_allocator above uses) as on an XlaDevice, this is a dummy // allocator that returns XlaTensor objects. The XlaCompiler needs a real // allocator to allocate real buffers. platform_id = xla_device_metadata->platform()->id();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 17:23:27 UTC 2024 - 17.4K bytes - Viewed (0)