- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 10 for GetXlaDeviceFlags (0.54 sec)
-
tensorflow/compiler/jit/xla_gpu_device.cc
std::vector<std::unique_ptr<Device>>* devices) override; }; Status XlaGpuDeviceFactory::ListPhysicalDevices(std::vector<string>* devices) { XlaDeviceFlags* flags = GetXlaDeviceFlags(); if (!flags->tf_xla_enable_xla_devices && !XlaDevicesCreationRequired()) { VLOG(1) << "Not creating XLA devices, tf_xla_enable_xla_devices not set " "and XLA devices creation not required";
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 6.6K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_cpu_device.cc
std::vector<std::unique_ptr<Device>>* devices) override; }; Status XlaCpuDeviceFactory::ListPhysicalDevices(std::vector<string>* devices) { XlaDeviceFlags* flags = GetXlaDeviceFlags(); if (!flags->tf_xla_enable_xla_devices && !XlaDevicesCreationRequired()) { VLOG(1) << "Not creating XLA devices, tf_xla_enable_xla_devices not set " "and XLA device creation not requested";
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/jit/device_context_test.cc
static bool Initialized = [] { auto& rollout_config = GetXlaOpsCommonFlags()->tf_xla_use_device_api; rollout_config.enabled_for_xla_launch_ = true; rollout_config.enabled_for_compile_on_demand_ = true; tensorflow::GetXlaDeviceFlags()->tf_xla_enable_xla_devices = true; return true; }(); class DeviceContextTest : public ::testing::Test { public: void SetDevice(const string& device_type) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 3.7K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_platform_info_test.cc
using PjRtDeviceCompiler = DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>; class XlaPlatformInfoTest : public ::testing::Test { protected: void SetUp() override { tensorflow::GetXlaDeviceFlags()->tf_xla_enable_xla_devices = true; tensorflow::GetMarkForCompilationPassFlags() ->tf_xla_persistent_cache_directory = ""; tensorflow::GetMarkForCompilationPassFlags()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Jan 14 15:17:12 UTC 2024 - 13.6K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util_gpu_test.cc
rollout_config.enabled_for_compile_on_demand_ = true; rollout_config.enabled_for_gpu_ = true; // Set flag to enable using XLA devices. PJRT currently is only supported // for XLA devices. GetXlaDeviceFlags()->tf_xla_enable_xla_devices = true; // Add and setup the GPU device. auto device_type = DeviceType(DEVICE_GPU); auto jit_device_type = DeviceType(DEVICE_GPU);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 10K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compiler_options_util_test.cc
GetShapeDeterminationFns()); } class XlaCompilerOptionsTest : public ::testing::Test { protected: void SetUp() override { tensorflow::GetXlaDeviceFlags()->tf_xla_enable_xla_devices = true; } DeviceSetup device_setup_; }; TEST_F(XlaCompilerOptionsTest, PjRtOptionsXlaDevice) { device_setup_.AddDevicesAndSetUp({DEVICE_XLA_GPU});
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Dec 29 01:41:20 UTC 2023 - 14.8K bytes - Viewed (0) -
tensorflow/compiler/jit/flags.h
// always return the same pointer. MarkForCompilationPassFlags* GetMarkForCompilationPassFlags(); BuildXlaOpsPassFlags* GetBuildXlaOpsPassFlags(); XlaSparseCoreFlags* GetXlaSparseCoreFlags(); XlaDeviceFlags* GetXlaDeviceFlags(); XlaOpsCommonFlags* GetXlaOpsCommonFlags(); XlaCallModuleFlags* GetXlaCallModuleFlags(); MlirCommonFlags* GetMlirCommonFlags(); void ResetJitCompilerFlags(); const JitRtFlags& GetJitRtFlags();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 17 18:52:57 UTC 2024 - 14.5K bytes - Viewed (0) -
tensorflow/compiler/jit/flags.cc
return mark_for_compilation_flags; } XlaSparseCoreFlags* GetXlaSparseCoreFlags() { absl::call_once(flags_init, &AllocateAndParseFlags); return sparse_core_flags; } XlaDeviceFlags* GetXlaDeviceFlags() { absl::call_once(flags_init, &AllocateAndParseFlags); return device_flags; } XlaOpsCommonFlags* GetXlaOpsCommonFlags() { absl::call_once(flags_init, &AllocateAndParseFlags); return ops_flags;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 17 18:52:57 UTC 2024 - 24.5K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util_test.cc
rollout_config.enabled_for_xla_launch_ = true; rollout_config.enabled_for_compile_on_demand_ = true; // Set flag to enable using XLA devices. PJRT currently is only supported // for XLA devices. GetXlaDeviceFlags()->tf_xla_enable_xla_devices = true; // Add and setup the XLA_CPU device. auto device_type = DeviceType(DEVICE_XLA_CPU); rollout_config.AllowForDeviceInXlaLaunch(device_type);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 28.8K bytes - Viewed (0) -
tensorflow/compiler/jit/mark_for_compilation_pass_test.cc
#include "tensorflow/core/platform/test.h" using ::tensorflow::testing::FindNodeByName; namespace tensorflow { namespace { static bool Initialized = [] { tensorflow::GetXlaDeviceFlags()->tf_xla_enable_xla_devices = true; return true; }(); REGISTER_OP("UncompilableNullary").Output("o: float"); REGISTER_OP("UncompilableUnary").Input("a: float").Output("o: float");
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 14 10:11:10 UTC 2024 - 79.6K bytes - Viewed (0)