- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 12 for device_allocator_ (0.2 sec)
-
tensorflow/compiler/jit/xla_platform_info.h
// then device_allocator_ is the xla::Backend's memory allocator. If the op // is placed on a regular CPU or GPU device then device_allocator_ is null. // The allocator is of unknown provenance; keep it in a shared pointer to // set an artificial refcount of one. std::shared_ptr<se::DeviceMemoryAllocator> device_allocator_; XlaPlatformInfo(const XlaPlatformInfo&) = delete;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 7.2K bytes - Viewed (0) -
tensorflow/compiler/jit/device_context_test.cc
host_alloc_attr.set_on_host(true); host_allocator_ = device_->GetAllocator(host_alloc_attr); tensorflow::AllocatorAttributes device_alloc_attr; device_alloc_attr.set_on_host(false); device_allocator_ = device_->GetAllocator(device_alloc_attr); tensorflow::DeviceContext* device_context; auto status = device_->TryGetDeviceContext(&device_context); TF_EXPECT_OK(status);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 3.7K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_host_send_recv_device_context_test.cc
AllocatorAttributes device_alloc_attr; device_alloc_attr.set_on_host(false); device_allocator_ = device_->GetAllocator(device_alloc_attr); } protected: std::unique_ptr<Device> device_; Allocator* host_allocator_; Allocator* device_allocator_; }; TEST_F(XlaHostSendRecvDeviceContextTest, CopyDeviceTensorToCPU) { SetDevice("GPU");
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 7.2K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util_gpu_test.cc
return host_tensor; } // Creates a Tensor on device using the device_allocator_ template <typename T> Tensor* CreateDeviceTensor(const TensorShape& shape, const gtl::ArraySlice<T> data) { Tensor* host_tensor = CreateHostTensor<T>(shape, data); Tensor* device_tensor = new Tensor(device_allocator_, DataTypeToEnum<T>::v(), shape);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 10K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util_test.cc
return host_tensor; } // Creates a Tensor on device using the device_allocator_ template <typename T> Tensor* CreateDeviceTensor(const TensorShape& shape, const gtl::ArraySlice<T> data) { Tensor* host_tensor = CreateHostTensor<T>(shape, data); Tensor* device_tensor = new Tensor(device_allocator_, DataTypeToEnum<T>::v(), shape);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 28.8K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compiler_options_util_test.cc
XlaPlatformInfo platform_info( compilation_device_type, /*platform_id=*/nullptr, /*xla_device_metadata=*/nullptr, /*pjrt_device_metadata=*/pjrt_device_metadata.get(), /*device_allocator=*/nullptr); XlaCompiler::Options options = GenerateCompilerOptionsForPjRt( *device_setup_.flr(), device, platform_info, /*pjrt_device_compiler=*/nullptr);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Dec 29 01:41:20 UTC 2023 - 14.8K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_platform_info_test.cc
/*xla_device_metadata=*/nullptr, /*pjrt_device_metadata=*/nullptr, /*device_allocator=*/nullptr); XlaDeviceCompiler* xla_device_compiler = nullptr; TF_EXPECT_OK(BuildXlaDeviceCompiler(device, nullptr, platform_info, compilation_device_type,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Jan 14 15:17:12 UTC 2024 - 13.6K bytes - Viewed (0) -
tensorflow/compiler/jit/device_compiler_client.cc
} else if (default_device_ordinal != -1) { build_options.set_device_ordinal(default_device_ordinal); } build_options.set_result_layout(result.xla_output_shape); build_options.set_device_allocator(options.device_allocator.get()); build_options.set_alias_passthrough_params(options.alias_passthrough_params); build_options.mutable_debug_options()->set_xla_detailed_logging( options.detailed_logging);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Oct 26 20:35:26 UTC 2023 - 1.8K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_host_send_device_context.h
// // Example usage: // // Device device; // stream_executor::Stream stream(executor); // Tensor cpu_tensor(host_allocator, DT_FLOAT, TensorShape({2, 2})); // Tensor device_tensor(device_allocator, DT_FLOAT, TensorShape({2, 2})); // se::DeviceMemoryBase gpu_dst{device_tensor.data(), 4 * sizeof(float)}; // xla::Shape shape(xla::F32, {2, 2}, {}, {}) // tsl::AsyncValueRef<std::unique_ptr<se::Event>> done_event =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 3.7K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_host_recv_device_context.h
// Concrete once transfer is completed. // // Example usage: // // Device device; // stream_executor::Stream stream(executor); // Tensor device_tensor(device_allocator, DT_FLOAT, TensorShape({2, 2})); // se::DeviceMemoryBase gpu_dst{device_tensor.data(), 4 * sizeof(float)}; // xla::Shape shape(xla::F32, {2, 2}, {}, {}) // tsl::AsyncValueRef<std::unique_ptr<se::Event>> done_event =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 3.9K bytes - Viewed (0)