- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 12 for device_vendor (0.22 sec)
-
tensorflow/c/experimental/stream_executor/stream_executor_test.cc
SE_CreateDeviceParams* params, TF_Status* status) { params->device->hardware_name = hardware_name; params->device->device_vendor = vendor; params->device->pci_bus_id = pci_bus_id; }; device_fns_.get_numa_node = [](const SP_Device* device) { return 123; }; device_fns_.get_memory_bandwidth = [](const SP_Device* device) -> int64_t {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 19:54:04 UTC 2024 - 26.5K bytes - Viewed (0) -
tensorflow/c/experimental/stream_executor/stream_executor.cc
internal::DeviceDescriptionBuilder builder; if (device_.hardware_name != nullptr) { builder.set_name(device_.hardware_name); } if (device_.device_vendor != nullptr) { builder.set_device_vendor(device_.device_vendor); } if (device_.pci_bus_id != nullptr) { builder.set_pci_bus_id(device_.pci_bus_id); } if (device_fns_->get_numa_node != nullptr) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jun 14 07:39:19 UTC 2024 - 27.1K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_host_send_device_context.h
// Copies 'cpu_tensor' to `device_memory_base_` with `shape_`. // `device_tensor` is unused. void CopyCPUTensorToDevice(const Tensor* cpu_tensor, Device* device, Tensor* device_tensor, StatusCallback done, bool sync_dst_compute) const override; void CopyDeviceTensorToCPU(const Tensor* device_tensor, StringPiece tensor_name, Device* device,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 3.7K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_host_send_recv_device_context_test.cc
TEST_F(XlaHostSendRecvDeviceContextTest, CopyDeviceTensorToCPU) { SetDevice("GPU"); Tensor origin_cpu_tensor(host_allocator_, DT_FLOAT, TensorShape({2, 2})); test::FillValues<float>(&origin_cpu_tensor, {1.2, 2.3, 3.4, 4.5}); Tensor device_tensor(device_allocator_, DT_FLOAT, TensorShape({2, 2})); Tensor dest_cpu_tensor(host_allocator_, DT_FLOAT, TensorShape({2, 2})); stream_executor::Platform* platform =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 7.2K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device_context.cc
device_tensor->shape(), device_tensor->dtype(), std::nullopt); Status status = [&]() -> Status { TF_ASSIGN_OR_RETURN(xla::Shape shape, shape_determination_fns_.shape_representation_fn( device_tensor->shape(), device_tensor->dtype(), /*fast_mem=*/false, layout_preference)); // The device tensor should always be fresh.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 12.7K bytes - Viewed (0) -
tensorflow/compiler/jit/pjrt_device_context.cc
// tensor. absl::StatusOr<Tensor> t = MakeTensorFromPjRtBuffer( device_tensor->dtype(), device_tensor->shape(), std::move(*buffer_or)); if (!t.ok()) { done(t.status()); return; } *device_tensor = *t; } else { AsyncValueTensor* result_tensor = tensorflow::AsyncValueTensor::FromTensor(device_tensor); // The result tensor should be newly allocated, which does not point to a
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 13 08:49:31 UTC 2024 - 11.6K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_host_recv_device_context.h
Tensor* device_tensor, StatusCallback done, bool sync_dst_compute) const override { done(errors::Internal("host->device copy not implemented.")); } // Copies `device_memory_base_` with `shape_` into `cpu_tensor`. // `device_tensor` is unused. void CopyDeviceTensorToCPU(const Tensor* device_tensor,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 3.9K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util_test.cc
Tensor* host_tensor = CreateHostTensor<T>(shape, data); Tensor* device_tensor = new Tensor(device_allocator_, DataTypeToEnum<T>::v(), shape); TF_EXPECT_OK(device_context_->CopyCPUTensorToDeviceSync( host_tensor, device_, device_tensor)); tensors_.push_back(device_tensor); return device_tensor; } // Gets the `output_index`-th output set in the context_
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 28.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/ir/gpu_ops.td
// GPU MIG. def TransferToDeviceOp: Gpu_Op<"transfer_to_device"> { let summary = "Transfer a CPU tensor to device."; let description = [{ Transfer a CPU tensor to device. Example: %device_tensor = gpurt.transfer_to_device %cpu_tensor }]; let arguments = (ins TFTensorType); let results = (outs TFTensorType); let assemblyFormat = "operands attr-dict"; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 18 15:01:21 UTC 2024 - 3.3K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_host_send_device_context.cc
#include "tensorflow/compiler/tf2xla/type_util.h" namespace tensorflow { void XlaHostSendDeviceContext::CopyCPUTensorToDevice( const Tensor* cpu_tensor, Device* device, Tensor* device_tensor, StatusCallback done, bool sync_dst_compute) const { auto status = stream_->Memcpy(device_memory_base_, cpu_tensor->data(), device_memory_base_->size()); if (!status.ok()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 1.6K bytes - Viewed (0)