- Sort Score
- Result 10 results
- Languages All
Results 1 - 3 of 3 for xla_allocator_ (0.11 sec)
-
tensorflow/compiler/jit/xla_device.cc
if (attr.on_host()) { return cpu_allocator(); } if (xla_allocator_ == nullptr) { if (UsePjRtForSingleDeviceCompilation(device_name_)) { VLOG(1) << "XlaDevice " << this << " uses AsyncValueAllocator"; pjrt_allocator_ = std::make_unique<AsyncValueAllocator>(); xla_allocator_ = pjrt_allocator_.get(); } else {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 21:05:42 UTC 2024 - 24.3K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util.cc
} XlaComputationLaunchContext::XlaComputationLaunchContext( xla::LocalClient* client, se::DeviceMemoryAllocator* xla_allocator, int device_ordinal, bool allocate_xla_tensors, bool use_multiple_streams) : client_(client), xla_allocator_(xla_allocator), allocate_xla_tensors_(allocate_xla_tensors), use_multiple_streams_(use_multiple_streams), device_ordinal_(device_ordinal) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 40.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_platform_info.cc
// theoretically be more correct, but XLA returns a nice OOM message in a // Status and StreamExecutor does not. // // Importantly we can't use ctx->device()->GetAllocator() as the allocator // (which xla_allocator above uses) as on an XlaDevice, this is a dummy // allocator that returns XlaTensor objects. The XlaCompiler needs a real // allocator to allocate real buffers. platform_id = xla_device_metadata->platform()->id();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 17:23:27 UTC 2024 - 17.4K bytes - Viewed (0)