- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 42 for PJRT (0.07 sec)
-
tensorflow/compiler/jit/xla_compiler_options_util.h
#include "tensorflow/compiler/jit/device_compiler.h" #include "tensorflow/compiler/jit/xla_platform_info.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "xla/client/local_client.h" #include "xla/pjrt/pjrt_client.h" namespace tensorflow { // Returns created options for the XLA compiler. XlaCompiler::Options GenerateCompilerOptions( const DeviceCompiler<xla::LocalExecutable, xla::LocalClient>&
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Dec 29 01:41:20 UTC 2023 - 2.7K bytes - Viewed (0) -
tensorflow/compiler/jit/pjrt_compile_util.h
// Compiles a `function` to PjRtLoadedExecutable `executable` with `ctx`. // The compilation result is output in `compilation_result`. The PJRT client // used for compilation is output in `client`. The PJRT executable is output in // `executable`. Status CompileToPjRtLoadedExecutable( const OpKernelContext& ctx, const XlaPlatformInfo& platform_info, const NameAttrList& function, const std::vector<XlaCompiler::Argument>& args,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 2.7K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util_gpu_test.cc
public: PjRtExecutionUtilGpuTest() { // Set flag to use PJRT for device compilation and execution. auto& rollout_config = GetXlaOpsCommonFlags()->tf_xla_use_device_api; rollout_config.enabled_for_xla_launch_ = true; rollout_config.enabled_for_compile_on_demand_ = true; rollout_config.enabled_for_gpu_ = true; // Set flag to enable using XLA devices. PJRT currently is only supported // for XLA devices.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 10K bytes - Viewed (0) -
tensorflow/compiler/jit/pjrt_compile_util.cc
#include "tensorflow/compiler/jit/xla_compile_util.h" #include "tensorflow/compiler/jit/xla_compiler_options_util.h" #include "tensorflow/compiler/jit/xla_platform_info.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "xla/pjrt/pjrt_client.h" #include "tensorflow/core/framework/device_base.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/resource_mgr.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 3.8K bytes - Viewed (0) -
tensorflow/compiler/jit/pjrt_base_device.cc
options.shape_determination_fns) { if (options.shape_determination_fns.empty()) { LOG(ERROR) << "shape_representation_fns must be non-empty."; } VLOG(1) << "Created PJRT base device " << options.compilation_device_name << " device_name: " << name(); } /*static*/ absl::StatusOr<const PjRtBaseDevice::Metadata*> PjRtBaseDevice::GetMetadataFromDevice(DeviceBase* device) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 12:19:41 UTC 2024 - 2.5K bytes - Viewed (0) -
tensorflow/compiler/jit/flags.cc
"If true, uses Device API (PjRt) for single device compilation and " "execution of functions marked for JIT compilation i.e. " "jit_compile=True. Defaults to false."), Flag("tf_xla_use_device_api_for_compile_on_demand", &ops_flags->tf_xla_use_device_api.enabled_for_compile_on_demand_, "If true, uses Device API (PjRt) for compiling and executing ops "
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 17 18:52:57 UTC 2024 - 24.5K bytes - Viewed (0) -
tensorflow/compiler/jit/pjrt_device_context.cc
#include "tensorflow/compiler/jit/pjrt_tensor_buffer.h" #include "tensorflow/compiler/jit/pjrt_tensor_buffer_util.h" #include "tensorflow/compiler/tf2xla/literal_util.h" #include "xla/pjrt/pjrt_client.h" #include "xla/tsl/c/tsl_status_internal.h" #include "tensorflow/core/common_runtime/dma_helper.h" #include "tensorflow/core/common_runtime/next_pluggable_device/next_pluggable_device_api.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 13 08:49:31 UTC 2024 - 11.6K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compile_on_demand_op.cc
#include "tensorflow/compiler/tf2xla/xla_helpers.h" #include "xla/client/local_client.h" #include "xla/executable_run_options.h" #include "xla/hlo/ir/hlo_input_output_alias_config.h" #include "xla/pjrt/pjrt_client.h" #include "xla/pjrt/tf_pjrt_client.h" #include "xla/service/executable.h" #include "xla/service/gpu/gpu_executable_run_options.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/op_kernel.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 29 08:39:39 UTC 2024 - 13.4K bytes - Viewed (0) -
tensorflow/compiler/jit/device_executable_persistor_test.cc
#include "tensorflow/compiler/jit/xla_device_compiler_client.h" #include "xla/client/client_library.h" #include "xla/client/executable_build_options.h" #include "xla/client/local_client.h" #include "xla/pjrt/pjrt_client.h" #include "xla/pjrt/tfrt_cpu_pjrt_client.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/status_matchers.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 12 06:33:33 UTC 2024 - 25.9K bytes - Viewed (0) -
tensorflow/compiler/jit/pjrt_device_context.h
#include "tensorflow/core/framework/device_base.h" #include "tensorflow/core/platform/status.h" namespace tensorflow { // Helper class for managing data transfers between host and accelerator // devices using PjRt. class PjRtDeviceContext : public DeviceContext { public: explicit PjRtDeviceContext( XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns, bool use_pjrt_tensor_buffer = false)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jul 19 19:27:39 UTC 2023 - 2.7K bytes - Viewed (0)