- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 15 for xla_device (0.16 sec)
-
tensorflow/compiler/jit/xla_device.cc
} /*static*/ Status XlaDevice::GetMetadataFromDevice( DeviceBase* device, const XlaDevice::Metadata** metadata) { *metadata = nullptr; XlaDevice* xla_device = dynamic_cast<XlaDevice*>(device->UnderlyingDevice()); if (xla_device == nullptr) { return errors::Internal( "Cannot get XLA metadata from non-XLA device \"", device->name(), "\". GetMetadata must only be called on an XLA device. Either an "
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 21:05:42 UTC 2024 - 24.3K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device.h
==============================================================================*/ // The XlaDevice executes a TensorFlow graph using the XLA linear algebra // runtime. // // Operators assigned to an XlaDevice are compiled into XLA computations. // Tensors on an XlaDevice are thin wrappers around XLA ScopedShapedBuffers. // // XlaDevice is instantiated separately for each XLA backend (e.g., CPU or GPU),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 13.4K bytes - Viewed (0) -
tensorflow/compiler/jit/BUILD
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 00:41:19 UTC 2024 - 61.5K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_cpu_device.cc
#include "tensorflow/compiler/jit/kernels/xla_ops.h" #include "tensorflow/compiler/jit/xla_compile_on_demand_op.h" #include "tensorflow/compiler/jit/xla_device.h" #include "tensorflow/compiler/jit/xla_device_ops.h" #include "tensorflow/compiler/tf2xla/layout_util.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/stream_executor/platform_manager.h" #include "tensorflow/core/common_runtime/device_factory.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_gpu_device.cc
#include "tensorflow/compiler/jit/defs.h" #include "tensorflow/compiler/jit/flags.h" #include "tensorflow/compiler/jit/kernels/xla_ops.h" #include "tensorflow/compiler/jit/xla_device.h" #include "tensorflow/compiler/jit/xla_device_ops.h" #include "tensorflow/compiler/jit/xla_platform_info.h" #include "tensorflow/compiler/tf2xla/layout_util.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/stream_executor/gpu/gpu_init.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 6.6K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_platform_info.h
#include <memory> #include <optional> #include <string> #include "tensorflow/compiler/jit/device_compiler.h" #include "tensorflow/compiler/jit/pjrt_base_device.h" #include "tensorflow/compiler/jit/xla_device.h" #include "xla/stream_executor/integrations/tf_allocator_adapter.h" #include "tensorflow/core/framework/op_kernel.h" namespace tensorflow { // Holds some information about the platform on which an
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 7.2K bytes - Viewed (0) -
tensorflow/compiler/jit/kernels/xla_ops.h
#ifndef TENSORFLOW_COMPILER_JIT_KERNELS_XLA_OPS_H_ #define TENSORFLOW_COMPILER_JIT_KERNELS_XLA_OPS_H_ #include <atomic> #include "tensorflow/compiler/jit/device_compiler.h" #include "tensorflow/compiler/jit/xla_device.h" #include "tensorflow/compiler/jit/xla_launch_util.h" #include "tensorflow/compiler/jit/xla_platform_info.h" #include "xla/stream_executor/integrations/tf_allocator_adapter.h" #include "tensorflow/core/framework/allocator.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 16 23:44:26 UTC 2023 - 4.8K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_tpu_device.cc
#include "tensorflow/compiler/jit/xla_tpu_device.h" #include "absl/types/optional.h" #include "tensorflow/compiler/jit/kernels/xla_ops.h" #include "tensorflow/compiler/jit/xla_device.h" #include "tensorflow/compiler/jit/xla_device_context.h" #include "tensorflow/compiler/jit/xla_device_ops.h" #include "tensorflow/compiler/tf2xla/layout_util.h" #include "tensorflow/compiler/tf2xla/shape_util.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 22:53:47 UTC 2024 - 20.9K bytes - Viewed (0) -
tensorflow/compiler/jit/flags.h
// If non-zero, limits the size of any table shard to be below these // many bytes. int64_t tf_xla_sparse_core_stacking_table_shard_limit_bytes; }; // Flags associated with the XLA bridge's xla_device module. struct XlaDeviceFlags { // Switch the CPU device into "on-demand" mode, where instead of // auto-clustering ops are compiled one by one just-in-time.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 17 18:52:57 UTC 2024 - 14.5K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_platform_info_test.cc
TEST_F(XlaPlatformInfoTest, BuildXlaDeviceCompilerXlaDeviceMetadata) { device_setup_.AddDevicesAndSetUp({DEVICE_XLA_GPU}); Device* device = device_setup_.GetDevice(DEVICE_XLA_GPU); const XlaDevice::Metadata* metadata = nullptr; TF_CHECK_OK(XlaDevice::GetMetadataFromDevice(device, &metadata)); XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(device); TF_ASSERT_OK_AND_ASSIGN( DeviceType compilation_device_type,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Jan 14 15:17:12 UTC 2024 - 13.6K bytes - Viewed (0)