- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 18 for PJRT (0.06 sec)
-
tensorflow/compiler/jit/flags.h
bool enabled_for_all_; // If true, enable Device API (PjRt) for TF GPU device. This is a helper // flag so that individual tests can turn on PjRt for GPU specifically. // Once the rollout to GPU is complete, this flag can be deprecated. bool enabled_for_gpu_; private: // Devices for which using Device API (PjRt) is allowed in the XlaLaunch op. // This can only be modified programmatically.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 17 18:52:57 UTC 2024 - 14.5K bytes - Viewed (0) -
tensorflow/c/experimental/next_pluggable_device/c_api.cc
#include "tensorflow/c/tf_tensor_internal.h" #include "tensorflow/compiler/jit/variable_info.h" #include "tensorflow/compiler/jit/variable_info_util.h" #include "xla/pjrt/c/pjrt_c_api.h" #include "xla/pjrt/c/pjrt_c_api_helpers.h" #include "xla/pjrt/pjrt_c_api_client.h" #include "xla/pjrt/pjrt_client.h" #include "xla/tsl/distributed_runtime/coordination/coordination_service_agent.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 22 05:48:24 UTC 2024 - 13.9K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util_gpu_test.cc
public: PjRtExecutionUtilGpuTest() { // Set flag to use PJRT for device compilation and execution. auto& rollout_config = GetXlaOpsCommonFlags()->tf_xla_use_device_api; rollout_config.enabled_for_xla_launch_ = true; rollout_config.enabled_for_compile_on_demand_ = true; rollout_config.enabled_for_gpu_ = true; // Set flag to enable using XLA devices. PJRT currently is only supported // for XLA devices.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 10K bytes - Viewed (0) -
tensorflow/compiler/jit/flags.cc
"If true, uses Device API (PjRt) for single device compilation and " "execution of functions marked for JIT compilation i.e. " "jit_compile=True. Defaults to false."), Flag("tf_xla_use_device_api_for_compile_on_demand", &ops_flags->tf_xla_use_device_api.enabled_for_compile_on_demand_, "If true, uses Device API (PjRt) for compiling and executing ops "
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 17 18:52:57 UTC 2024 - 24.5K bytes - Viewed (0) -
tensorflow/compiler/jit/pjrt_device_context.cc
#include "tensorflow/compiler/jit/pjrt_tensor_buffer.h" #include "tensorflow/compiler/jit/pjrt_tensor_buffer_util.h" #include "tensorflow/compiler/tf2xla/literal_util.h" #include "xla/pjrt/pjrt_client.h" #include "xla/tsl/c/tsl_status_internal.h" #include "tensorflow/core/common_runtime/dma_helper.h" #include "tensorflow/core/common_runtime/next_pluggable_device/next_pluggable_device_api.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 13 08:49:31 UTC 2024 - 11.6K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compile_on_demand_op.cc
#include "tensorflow/compiler/tf2xla/xla_helpers.h" #include "xla/client/local_client.h" #include "xla/executable_run_options.h" #include "xla/hlo/ir/hlo_input_output_alias_config.h" #include "xla/pjrt/pjrt_client.h" #include "xla/pjrt/tf_pjrt_client.h" #include "xla/service/executable.h" #include "xla/service/gpu/gpu_executable_run_options.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/op_kernel.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 29 08:39:39 UTC 2024 - 13.4K bytes - Viewed (0) -
tensorflow/compiler/jit/device_executable_persistor_test.cc
#include "tensorflow/compiler/jit/xla_device_compiler_client.h" #include "xla/client/client_library.h" #include "xla/client/executable_build_options.h" #include "xla/client/local_client.h" #include "xla/pjrt/pjrt_client.h" #include "xla/pjrt/tfrt_cpu_pjrt_client.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/status_matchers.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 12 06:33:33 UTC 2024 - 25.9K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util.cc
#include "tensorflow/compiler/tf2xla/shape_util.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "xla/client/local_client.h" #include "xla/pjrt/pjrt_client.h" #include "xla/pjrt/pjrt_future.h" #include "xla/pjrt/pjrt_stream_executor_client.h" #include "xla/pjrt/tracked_device_buffer.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/stream_executor/platform_manager.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 40.4K bytes - Viewed (0) -
tensorflow/compiler/jit/kernels/xla_ops.cc
VLOG(2) << "Compiling using PJRT"; Status status = CompileToPjRtLoadedExecutable( *ctx, platform_info_, function_, xla_compiler_args, DeviceCompileMode::kStrict, has_ref_vars_, /*may_alias_resource_update=*/true, &compilation_result, &pjrt_client, &pjrt_executable); OP_REQUIRES_OK_ASYNC(ctx, status, done); VLOG(2) << "Compiled using PJRT: " << status;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 41.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util_test.cc
#include "tensorflow/compiler/jit/variable_info.h" #include "tensorflow/compiler/jit/variable_info_util.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/pjrt/pjrt_client.h" #include "xla/pjrt/tfrt_cpu_pjrt_client.h" #include "xla/tests/literal_test_util.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/device.h" #include "tensorflow/core/framework/fake_input.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 28.8K bytes - Viewed (0)