- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 12 for DeviceCompiler (0.34 sec)
-
tensorflow/compiler/jit/xla_platform_info.h
absl::string_view visible_device_list); // Returns the device type for building a DeviceCompiler from the given platform // type. absl::StatusOr<DeviceType> GetCompilationDeviceType( const DeviceType& platform_device_type); // Builds a DeviceCompiler that uses xla::LocalClient using `platform_info` and // `compilation_device_type` (in non-TPU case) and sets *xla_device_compiler to
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 7.2K bytes - Viewed (0) -
tensorflow/compiler/jit/device_compiler.h
// to disk. // // Since XLA computations must have static shapes, DeviceCompiler generates a // new XLA computation for each new set of input shapes. // TODO(b/255826209): De-templatize once we've moved to Device API completely. template <typename ExecutableType, typename ClientType> class DeviceCompiler : public ResourceBase { public: DeviceCompiler( std::unique_ptr<DeviceExecutablePersistor<ExecutableType, ClientType>>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 22.1K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compiler_options_util.cc
#include "tensorflow/core/framework/function.h" #include "tsl/framework/device_id_utils.h" namespace tensorflow { namespace { using XlaDeviceCompiler = DeviceCompiler<xla::LocalExecutable, xla::LocalClient>; using PjRtDeviceCompiler = DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>; inline void LogOptions(const XlaCompiler::Options& options) { VLOG(2) << "XlaCompiler::Options[device_type=" << options.device_type
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 6.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compile_util.h
// `device_type` in either the XlaLaunch op or the XlaCompileOnDemand op. bool UsePjRtForSingleDeviceCompilation(const DeviceType& device_type); // Gets the resource name of the PjRt DeviceCompiler for `device_type`. std::string GetPjRtDeviceCompilerResourceName(const DeviceType& device_type); // Gets the resource name of the DeviceCompilationProfiler for `device_type`
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 2.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_platform_info.cc
#include "tensorflow/core/tpu/tpu_defs.h" #include "tsl/framework/device_type.h" namespace tensorflow { namespace { using XlaDeviceCompiler = DeviceCompiler<xla::LocalExecutable, xla::LocalClient>; using PjRtDeviceCompiler = DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>; using XlaDeviceExecutablePersistor = DeviceExecutablePersistor<xla::LocalExecutable, xla::LocalClient>;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 17:23:27 UTC 2024 - 17.4K bytes - Viewed (0) -
tensorflow/compiler/jit/device_compiler_test.cc
#include "tensorflow/core/platform/status_matchers.h" #include "tensorflow/core/platform/statusor.h" namespace tensorflow { namespace { using ::testing::_; using ::testing::Return; using XlaDeviceCompiler = DeviceCompiler<xla::LocalExecutable, xla::LocalClient>; using XlaDeviceExecutablePersistor = DeviceExecutablePersistor<xla::LocalExecutable, xla::LocalClient>; using Signature = DeviceCompilationClusterSignature;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 12 06:33:33 UTC 2024 - 19.8K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_platform_info_test.cc
#include "tensorflow/core/tfrt/common/pjrt_util.h" #include "tensorflow/core/tpu/tpu_defs.h" namespace tensorflow { namespace { using XlaDeviceCompiler = DeviceCompiler<xla::LocalExecutable, xla::LocalClient>; using PjRtDeviceCompiler = DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>; class XlaPlatformInfoTest : public ::testing::Test { protected: void SetUp() override {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Jan 14 15:17:12 UTC 2024 - 13.6K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compile_util.cc
absl::StatusOr<ResourceMgr*> GetResourceMgrForDeviceCompiler( const OpKernelContext& ctx, const DeviceType& device_type) { // We store information about the JIT-compiled XLA computation in the // ResourceMgr. The DeviceCompiler (which contains the DeviceCompilationCache) // is stored in the tfrt_global ResourceMgr for TPU and the Device ResourceMgr // for CPU/GPU. This is to make sure the DeviceCompiler's lifecycle is
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 4.6K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compile_on_demand_op.cc
#include "tensorflow/core/tfrt/common/pjrt_util.h" #include "tsl/platform/errors.h" namespace tensorflow { namespace { using XlaDeviceCompiler = DeviceCompiler<xla::LocalExecutable, xla::LocalClient>; using PjRtDeviceCompiler = DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>; XlaCompiler::CompileOptions GetCompileOptions(bool for_pjrt = false) { XlaCompiler::CompileOptions compile_options;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 29 08:39:39 UTC 2024 - 13.4K bytes - Viewed (0) -
tensorflow/compiler/jit/get_compiler_ir.cc
absl::Span<const ArgShapeAndDType> input_arg_shape_and_dtype, absl::Span<const TensorHandle* const> input_handles, CompilerArgSource compiler_arg_source) { using XlaDeviceCompiler = DeviceCompiler<xla::LocalExecutable, xla::LocalClient>; se::Stream* stream = nullptr; if (const DeviceBase::AcceleratorDeviceInfo* accelerator_device_info = dev->tensorflow_accelerator_device_info()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 06:59:07 UTC 2024 - 19K bytes - Viewed (0)