- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 13 for device_context (2.26 sec)
-
tensorflow/compiler/jit/xla_device.cc
TF_ASSIGN_OR_RETURN(auto device_context, GetDeviceContextDefault()); device_context->Ref(); *out_context = device_context; return absl::OkStatus(); } // Warn about XLA_CPU/XLA_GPU exactly once. static void ShowXlaDeviceDeprecationWarning( absl::string_view compilation_device_name) { static absl::once_flag once; if (absl::StrContains(compilation_device_name, "CPU") ||
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 21:05:42 UTC 2024 - 24.3K bytes - Viewed (0) -
tensorflow/compiler/jit/device_context_test.cc
device_allocator_ = device_->GetAllocator(device_alloc_attr); tensorflow::DeviceContext* device_context; auto status = device_->TryGetDeviceContext(&device_context); TF_EXPECT_OK(status); device_context_.reset(device_context); } std::unique_ptr<Device> device_; tensorflow::core::RefCountPtr<DeviceContext> device_context_; tensorflow::Allocator* host_allocator_; tensorflow::Allocator* device_allocator_; };
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 3.7K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_host_send_recv_device_context_test.cc
std::move(se_event)); XlaHostRecvDeviceContext* device_context = new XlaHostRecvDeviceContext(stream.get(), gpu_dst, shape, done_event); TF_ASSERT_OK(device_context->CopyDeviceTensorToCPUSync( &device_tensor, "", device_.get(), &dest_cpu_tensor)); tensorflow::test::ExpectClose(origin_cpu_tensor, dest_cpu_tensor); device_context->Unref(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 7.2K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_host_send_device_context.h
// done_event->Init(); // // XlaHostSendDeviceContext device_context(&stream, &gpu_dst, // shape, done_event); // device_context.CopyCPUTensorToDeviceSync( // &cpu_tensor, &device, &device_tensor); class XlaHostSendDeviceContext : public DeviceContext { public: XlaHostSendDeviceContext( se::Stream* stream, se::DeviceMemoryBase* device_memory_base,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 3.7K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_host_recv_device_context.h
// done_event->Init(); // Tensor dest_cpu_tensor; // // XlaHostRecvDeviceContext device_context(&stream, gpu_dst, // shape, done_event); // device_context.CopyDeviceTensorToCPUSync( // &device_tensor, "", &device, &dest_cpu_tensor); class XlaHostRecvDeviceContext : public DeviceContext { public: XlaHostRecvDeviceContext( se::Stream* stream, const se::DeviceMemoryBase& device_memory_base,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 3.9K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util.cc
// TODO(cheshire): Avoid duplication with framework/op_kernel.h DeviceContext* device_context = nullptr; if (device != nullptr) { TF_RETURN_IF_ERROR(device->TryGetDeviceContext(&device_context)); bool using_default_context = false; auto cleanup = absl::MakeCleanup([&] { if (device_context != nullptr && !using_default_context) { device_context->Unref(); } }); if (device_context == nullptr) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 40.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device.h
Status TryGetDeviceContext(DeviceContext** out_context) override TF_LOCKS_EXCLUDED(mu_); Status MakeTensorFromProto(const TensorProto& tensor_proto, const AllocatorAttributes alloc_attrs, Tensor* tensor) override TF_LOCKS_EXCLUDED(mu_); Status MakeTensorFromProto(DeviceContext* device_context,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 13.4K bytes - Viewed (0) -
tensorflow/compiler/jit/kernels/xla_ops.cc
tsl::MakeConstructedAsyncValueRef<std::unique_ptr<se::Event>>( std::move(event)); Rendezvous::Args args; // Rendezvous::Args owns the device context pointer. args.device_context = new XlaHostRecvDeviceContext( stream, device_memory_base, shape, done_event); Tensor host_tensor; TF_RETURN_IF_ERROR(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 41.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util_gpu_test.cc
SetDevice(device_type, std::move(device)); XlaShapeLayoutHelpers::ShapeDeterminationFns shape_fns{ UseNoPreferenceLayoutFn(), IdentityShapeRepresentationFn()}; device_context_ = core::RefCountPtr<DeviceContext>( new PjRtDeviceContext(shape_fns, /*use_pjrt_tensor_buffer=*/true)); // Get the host allocator. AllocatorAttributes host_alloc_attr; host_alloc_attr.set_on_host(true);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 10K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util_test.cc
device_type, xla::GetTfrtCpuClient(/*asynchronous=*/true, /*cpu_device_count=*/1) .value())); // device_context_ should be a PjRtDeviceContext. TF_CHECK_OK(device_->TryGetDeviceContext(&device_context_)); // Get the host allocator. AllocatorAttributes host_alloc_attr; host_alloc_attr.set_on_host(true);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 28.8K bytes - Viewed (0)