- Sort Score
- Result 10 results
- Languages All
Results 1 - 7 of 7 for device_allocator_ (0.58 sec)
-
tensorflow/compiler/jit/xla_platform_info.h
// then device_allocator_ is the xla::Backend's memory allocator. If the op // is placed on a regular CPU or GPU device then device_allocator_ is null. // The allocator is of unknown provenance; keep it in a shared pointer to // set an artificial refcount of one. std::shared_ptr<se::DeviceMemoryAllocator> device_allocator_; XlaPlatformInfo(const XlaPlatformInfo&) = delete;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 7.2K bytes - Viewed (0) -
tensorflow/compiler/jit/device_context_test.cc
host_alloc_attr.set_on_host(true); host_allocator_ = device_->GetAllocator(host_alloc_attr); tensorflow::AllocatorAttributes device_alloc_attr; device_alloc_attr.set_on_host(false); device_allocator_ = device_->GetAllocator(device_alloc_attr); tensorflow::DeviceContext* device_context; auto status = device_->TryGetDeviceContext(&device_context); TF_EXPECT_OK(status);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 3.7K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_host_send_recv_device_context_test.cc
AllocatorAttributes device_alloc_attr; device_alloc_attr.set_on_host(false); device_allocator_ = device_->GetAllocator(device_alloc_attr); } protected: std::unique_ptr<Device> device_; Allocator* host_allocator_; Allocator* device_allocator_; }; TEST_F(XlaHostSendRecvDeviceContextTest, CopyDeviceTensorToCPU) { SetDevice("GPU");
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 7.2K bytes - Viewed (0) -
tensorflow/compiler/jit/device_compiler_client.cc
} else if (default_device_ordinal != -1) { build_options.set_device_ordinal(default_device_ordinal); } build_options.set_result_layout(result.xla_output_shape); build_options.set_device_allocator(options.device_allocator.get()); build_options.set_alias_passthrough_params(options.alias_passthrough_params); build_options.mutable_debug_options()->set_xla_detailed_logging( options.detailed_logging);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Oct 26 20:35:26 UTC 2023 - 1.8K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_host_send_device_context.h
// // Example usage: // // Device device; // stream_executor::Stream stream(executor); // Tensor cpu_tensor(host_allocator, DT_FLOAT, TensorShape({2, 2})); // Tensor device_tensor(device_allocator, DT_FLOAT, TensorShape({2, 2})); // se::DeviceMemoryBase gpu_dst{device_tensor.data(), 4 * sizeof(float)}; // xla::Shape shape(xla::F32, {2, 2}, {}, {}) // tsl::AsyncValueRef<std::unique_ptr<se::Event>> done_event =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 3.7K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_host_recv_device_context.h
// Concrete once transfer is completed. // // Example usage: // // Device device; // stream_executor::Stream stream(executor); // Tensor device_tensor(device_allocator, DT_FLOAT, TensorShape({2, 2})); // se::DeviceMemoryBase gpu_dst{device_tensor.data(), 4 * sizeof(float)}; // xla::Shape shape(xla::F32, {2, 2}, {}, {}) // tsl::AsyncValueRef<std::unique_ptr<se::Event>> done_event =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 3.9K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compiler_options_util.cc
options.graph_def_version = function_library.graph_def_version(); options.allow_cpu_custom_calls = (platform_info.platform_id() == se::host::kHostPlatformId); options.device_allocator = GetAllocator(device, stream, platform_info); if (platform_info.xla_device_metadata()) { options.shape_determination_fns = platform_info.xla_device_metadata()->default_shape_determination_fns(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 6.4K bytes - Viewed (0)