- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 20 for DeviceCompiler (0.18 sec)
-
tensorflow/compiler/jit/xla_compiler_options_util_test.cc
#include "tensorflow/core/tpu/tpu_defs.h" namespace tensorflow { namespace { using XlaDeviceCompiler = DeviceCompiler<xla::LocalExecutable, xla::LocalClient>; using XlaDeviceExecutablePersistor = DeviceExecutablePersistor<xla::LocalExecutable, xla::LocalClient>; using PjRtDeviceCompiler = DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>; using PjRtDeviceExecutablePersistor =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Dec 29 01:41:20 UTC 2023 - 14.8K bytes - Viewed (0) -
tensorflow/compiler/jit/pjrt_compile_util.h
// Instead, it takes the following arguments that are obtained from // OpKernelContext in the above function. // - `device`: the device used to compile the function. // - `rm`: the resource manager for DeviceCompiler to store JIT-compiled XLA // computation. // - `flr`: the FunctionLibraryRuntime for the `function`. Status CompileToPjRtLoadedExecutable( const DeviceBase* device, const XlaPlatformInfo& platform_info,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 2.7K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compile_util.cc
absl::StatusOr<ResourceMgr*> GetResourceMgrForDeviceCompiler( const OpKernelContext& ctx, const DeviceType& device_type) { // We store information about the JIT-compiled XLA computation in the // ResourceMgr. The DeviceCompiler (which contains the DeviceCompilationCache) // is stored in the tfrt_global ResourceMgr for TPU and the Device ResourceMgr // for CPU/GPU. This is to make sure the DeviceCompiler's lifecycle is
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 4.6K bytes - Viewed (0) -
tensorflow/compiler/jit/device_compiler_disable_test.cc
XlaDeviceExecutablePersistor::Config(), device_type); auto compiler_client = std::make_unique<XlaDeviceCompilerClient>(client); auto xla_device_compiler = new DeviceCompiler<xla::LocalExecutable, xla::LocalClient>( std::move(persistor), std::move(compiler_client)); core::ScopedUnref xla_device_compiler_ref(xla_device_compiler); auto profiler = new DeviceCompilationProfiler();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/jit/pjrt_compile_util.cc
#include "tensorflow/core/lib/core/refcount.h" #include "tensorflow/core/platform/status.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace tensorflow { using PjRtDeviceCompiler = DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>; Status CompileToPjRtLoadedExecutable( const DeviceBase* device, const XlaPlatformInfo& platform_info, const NameAttrList& function,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 3.8K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compile_on_demand_op.cc
#include "tensorflow/core/tfrt/common/pjrt_util.h" #include "tsl/platform/errors.h" namespace tensorflow { namespace { using XlaDeviceCompiler = DeviceCompiler<xla::LocalExecutable, xla::LocalClient>; using PjRtDeviceCompiler = DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>; XlaCompiler::CompileOptions GetCompileOptions(bool for_pjrt = false) { XlaCompiler::CompileOptions compile_options;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 29 08:39:39 UTC 2024 - 13.4K bytes - Viewed (0) -
tensorflow/compiler/jit/kernels/xla_ops.h
// which will be compiled and executed using XLA. The XlaLocalLaunchOp is // responsible for handling interactions with the TensorFlow executor. // Once all inputs are present, and their shapes are known, the op can // use a 'DeviceCompiler' to compile and execute code which is specific // to the shapes of input Tensors. // XlaLocalLaunchOp uses xla::LocalClient::Compile() and // xla::LocalExecutable::Run(), and passes arguments into/out of XLA in device
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 16 23:44:26 UTC 2023 - 4.8K bytes - Viewed (0) -
tensorflow/compiler/jit/get_compiler_ir.cc
absl::Span<const ArgShapeAndDType> input_arg_shape_and_dtype, absl::Span<const TensorHandle* const> input_handles, CompilerArgSource compiler_arg_source) { using XlaDeviceCompiler = DeviceCompiler<xla::LocalExecutable, xla::LocalClient>; se::Stream* stream = nullptr; if (const DeviceBase::AcceleratorDeviceInfo* accelerator_device_info = dev->tensorflow_accelerator_device_info()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 06:59:07 UTC 2024 - 19K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util_test.cc
#include "tsl/lib/core/status_test_util.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" namespace tensorflow { namespace { using PjRtDeviceCompiler = DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>; using PjRtDeviceExecutablePersistor = DeviceExecutablePersistor<xla::PjRtLoadedExecutable, xla::PjRtClient>; absl::flat_hash_map<int, const Tensor*> GetVariableSnapshots(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 28.8K bytes - Viewed (0) -
tensorflow/compiler/jit/kernels/xla_ops.cc
} \ } while (0) namespace tensorflow { namespace { using XlaDeviceCompiler = DeviceCompiler<xla::LocalExecutable, xla::LocalClient>; using PjRtDeviceCompiler = DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>; auto* xla_launch_counter = monitoring::Counter<1>::New( "/tensorflow/core/xla_launch_counter",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 41.4K bytes - Viewed (0)