- Sort Score
- Result 10 results
- Languages All
Results 1 - 4 of 4 for AllocateShapedBuffer (0.2 sec)
-
tensorflow/compiler/jit/xla_tensor.h
// Assign the internal ShapedBuffer to new memory for the given dtype and // shape. If a ShapedBuffer exists already (has_shaped_buffer() == true), it // is replaced and the managed memory deallocated. Status AllocateShapedBuffer(DataType dtype, const xla::Shape& on_device_shape, xla::LocalClient* client, int device_ordinal); // Some Tensors can have complex on-device shapes, including tuple shapes. To
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 4.7K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_tensor.cc
} else { return se::DeviceMemoryBase(const_cast<char*>(tensor.tensor_data().data()), tensor.tensor_data().size()); } } Status XlaTensor::AllocateShapedBuffer(DataType dtype, const xla::Shape& on_device_shape, xla::LocalClient* client, int device_ordinal) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 4.5K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device_context.cc
// The device tensor should always be fresh. TF_RET_CHECK(!xla_tensor->has_shaped_buffer()); TF_RETURN_IF_ERROR( xla_tensor->AllocateShapedBuffer(device_tensor->dtype(), shape, client_, stream_->parent()->device_ordinal())); // The cpu_tensor and literal that we created here hold the data of host
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 12.7K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_tpu_device.cc
input->shape(), input->dtype(), /*use_fast_memory=*/false, layout_preference)); TF_RETURN_IF_ERROR(xla_output->AllocateShapedBuffer( input->dtype(), shape, dst_xla_context->client(), dst_device_ordinal)); VLOG(2) << "TpuDeviceToDeviceCopy: src: " << src_compute_stream->parent()->device_ordinal() << ", "
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 22:53:47 UTC 2024 - 20.9K bytes - Viewed (0)