- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 10 for DeviceCompiler (0.17 sec)
-
tensorflow/compiler/jit/xla_platform_info.h
absl::string_view visible_device_list); // Returns the device type for building a DeviceCompiler from the given platform // type. absl::StatusOr<DeviceType> GetCompilationDeviceType( const DeviceType& platform_device_type); // Builds a DeviceCompiler that uses xla::LocalClient using `platform_info` and // `compilation_device_type` (in non-TPU case) and sets *xla_device_compiler to
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 7.2K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compiler_options_util.h
#include "xla/client/local_client.h" #include "xla/pjrt/pjrt_client.h" namespace tensorflow { // Returns created options for the XLA compiler. XlaCompiler::Options GenerateCompilerOptions( const DeviceCompiler<xla::LocalExecutable, xla::LocalClient>& xla_device_compiler, const FunctionLibraryRuntime& function_library, DeviceBase* device, se::Stream* stream, const XlaPlatformInfo& platform_info,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Dec 29 01:41:20 UTC 2023 - 2.7K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compile_on_demand_op.h
void Compute(OpKernelContext* ctx) override; private: Status Compile(const std::vector<XlaCompiler::Argument>& args, OpKernelContext* ctx, DeviceCompiler<xla::LocalExecutable, xla::LocalClient>** xla_device_compiler, DeviceCompilationProfiler** profiler, const XlaCompiler::CompilationResult** result,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 3.2K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compiler_options_util.cc
#include "tensorflow/core/framework/function.h" #include "tsl/framework/device_id_utils.h" namespace tensorflow { namespace { using XlaDeviceCompiler = DeviceCompiler<xla::LocalExecutable, xla::LocalClient>; using PjRtDeviceCompiler = DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>; inline void LogOptions(const XlaCompiler::Options& options) { VLOG(2) << "XlaCompiler::Options[device_type=" << options.device_type
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 6.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compile_util.h
// `device_type` in either the XlaLaunch op or the XlaCompileOnDemand op. bool UsePjRtForSingleDeviceCompilation(const DeviceType& device_type); // Gets the resource name of the PjRt DeviceCompiler for `device_type`. std::string GetPjRtDeviceCompilerResourceName(const DeviceType& device_type); // Gets the resource name of the DeviceCompilationProfiler for `device_type`
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 2.4K bytes - Viewed (0) -
tensorflow/compiler/jit/pjrt_compile_util.h
// Instead, it takes the following arguments that are obtained from // OpKernelContext in the above function. // - `device`: the device used to compile the function. // - `rm`: the resource manager for DeviceCompiler to store JIT-compiled XLA // computation. // - `flr`: the FunctionLibraryRuntime for the `function`. Status CompileToPjRtLoadedExecutable( const DeviceBase* device, const XlaPlatformInfo& platform_info,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 2.7K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compile_util.cc
absl::StatusOr<ResourceMgr*> GetResourceMgrForDeviceCompiler( const OpKernelContext& ctx, const DeviceType& device_type) { // We store information about the JIT-compiled XLA computation in the // ResourceMgr. The DeviceCompiler (which contains the DeviceCompilationCache) // is stored in the tfrt_global ResourceMgr for TPU and the Device ResourceMgr // for CPU/GPU. This is to make sure the DeviceCompiler's lifecycle is
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 4.6K bytes - Viewed (0) -
tensorflow/compiler/jit/device_compiler_disable_test.cc
XlaDeviceExecutablePersistor::Config(), device_type); auto compiler_client = std::make_unique<XlaDeviceCompilerClient>(client); auto xla_device_compiler = new DeviceCompiler<xla::LocalExecutable, xla::LocalClient>( std::move(persistor), std::move(compiler_client)); core::ScopedUnref xla_device_compiler_ref(xla_device_compiler); auto profiler = new DeviceCompilationProfiler();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/jit/pjrt_compile_util.cc
#include "tensorflow/core/lib/core/refcount.h" #include "tensorflow/core/platform/status.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace tensorflow { using PjRtDeviceCompiler = DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>; Status CompileToPjRtLoadedExecutable( const DeviceBase* device, const XlaPlatformInfo& platform_info, const NameAttrList& function,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 3.8K bytes - Viewed (0) -
tensorflow/compiler/jit/kernels/xla_ops.h
// which will be compiled and executed using XLA. The XlaLocalLaunchOp is // responsible for handling interactions with the TensorFlow executor. // Once all inputs are present, and their shapes are known, the op can // use a 'DeviceCompiler' to compile and execute code which is specific // to the shapes of input Tensors. // XlaLocalLaunchOp uses xla::LocalClient::Compile() and // xla::LocalExecutable::Run(), and passes arguments into/out of XLA in device
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 16 23:44:26 UTC 2023 - 4.8K bytes - Viewed (0)