- Sort Score
- Result 10 results
- Languages All
Results 1 - 5 of 5 for AcceleratorDeviceInfo (0.54 sec)
-
tensorflow/compiler/jit/xla_device.cc
std::make_unique<DeviceBase::AcceleratorDeviceInfo>(); accelerator_device_info->default_context = device_contexts_.at(0); set_tensorflow_accelerator_device_info(accelerator_device_info.get()); accelerator_device_info_ = std::move(accelerator_device_info); VLOG(1) << "XlaDevice " << this << " new AcceleratorDeviceInfo " << accelerator_device_info_.get(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 21:05:42 UTC 2024 - 24.3K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_cpu_device.cc
UseNoPreferenceLayoutFn(), IdentityShapeRepresentationFn()}; options.shape_determination_fns = {shape_representation_fns}; auto device = std::make_unique<XlaDevice>(session_options, options); // Setting AcceleratorDeviceInfo because eager runtime relies on the device // context in tensorflow_accelerator_device_info(). Also, // tensorflow_accelerator_device_info() == nullptr is used as an IsCPU test.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device.h
absl::StatusOr<DeviceContext*> GetDeviceContextDefault(); // Get the device context given the index. absl::StatusOr<DeviceContext*> GetDeviceContextWithIndex(int index); // Instructs this XlaDevice to set a AcceleratorDeviceInfo, which holds extra // information for GPU and TPU devices. Status UseAcceleratorDeviceInfo() TF_LOCKS_EXCLUDED(mu_); // Instructs this XlaDevice to return 'sync_on_completion' for
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 13.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_tpu_device.cc
options.shape_determination_fns = {shape_determination_fns}; options.padded_shape_fn = &TpuPaddedShapeFn; auto device = std::make_unique<XlaDevice>(session_options, options); // The AcceleratorDeviceInfo actually provides information not only for GPU // devices but also for TPU. The name is a legacy from the pre-TPU // dark ages. Status status = device->UseAcceleratorDeviceInfo(); if (!status.ok()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 22:53:47 UTC 2024 - 20.9K bytes - Viewed (0) -
tensorflow/compiler/jit/get_compiler_ir.cc
CompilerArgSource compiler_arg_source) { using XlaDeviceCompiler = DeviceCompiler<xla::LocalExecutable, xla::LocalClient>; se::Stream* stream = nullptr; if (const DeviceBase::AcceleratorDeviceInfo* accelerator_device_info = dev->tensorflow_accelerator_device_info()) { stream = accelerator_device_info->stream; } TF_RETURN_IF_ERROR(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 06:59:07 UTC 2024 - 19K bytes - Viewed (0)