- Sort Score
- Result 10 results
- Languages All
Results 1 - 6 of 6 for GPU (0.02 sec)
-
.bazelrc
# CUDA WHEEL test:linux_cuda_wheel_test_filters --test_tag_filters=gpu,requires-gpu,-no_gpu,-no_oss,-oss_excluded,-oss_serial,-benchmark-test,-no_cuda11,-no_oss_py38,-no_oss_py39,-no_oss_py310 test:linux_cuda_wheel_test_filters --build_tag_filters=gpu,requires-gpu,-no_gpu,-no_oss,-oss_excluded,-oss_serial,-benchmark-test,-no_cuda11,-no_oss_py38,-no_oss_py39,-no_oss_py310
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Mon Oct 28 22:02:31 UTC 2024 - 51.3K bytes - Viewed (0) -
tensorflow/c/eager/c_api_test.cc
TFE_Op* matmul = MatMulOp(ctx, m, m); // Disable the test if no GPU is present. string gpu_device_name; if (GetDeviceName(ctx, &gpu_device_name, "GPU")) { TFE_OpSetDevice(matmul, "GPU:0", status); ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status); const char* device_name = TFE_OpGetDevice(matmul, status); ASSERT_TRUE(strstr(device_name, "GPU:0") != nullptr); TFE_OpSetDevice(matmul, "CPU:0", status);
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Thu Aug 03 20:50:20 UTC 2023 - 94.6K bytes - Viewed (0) -
tensorflow/c/c_api_test.cc
TEST(CAPI, Session_Min_GPU) { const string gpu_device = GPUDeviceName(); // Skip this test if no GPU is available. if (gpu_device.empty()) return; RunMinTest(gpu_device, /*use_XLA=*/false); } TEST(CAPI, Session_Min_XLA_GPU) { const string gpu_device = GPUDeviceName(); // Skip this test if no GPU is available. if (gpu_device.empty()) return; RunMinTest(gpu_device, /*use_XLA=*/true); }
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Sat Oct 12 16:27:48 UTC 2024 - 97K bytes - Viewed (0) -
CHANGELOG/CHANGELOG-1.3.md
* Do not query the metadata server to find out if running on GCE. Retry metadata server query for gcr if running on gce. ([#28871](https://github.com/kubernetes/kubernetes/pull/28871), [@vishh](https://github.com/vishh)) * Fix GPU resource validation ([#28743](https://github.com/kubernetes/kubernetes/pull/28743), [@therc](https://github.com/therc))
Registered: Fri Nov 01 09:05:11 UTC 2024 - Last Modified: Thu Dec 24 02:28:26 UTC 2020 - 84K bytes - Viewed (0) -
tensorflow/BUILD
) config_setting( name = "with_xla_support", define_values = {"with_xla_support": "true"}, visibility = ["//visibility:public"], ) # By default, XLA GPU is compiled into tensorflow when building with # --config=cuda even when `with_xla_support` is false. The config setting # here allows us to override the behavior if needed. config_setting( name = "no_xla_deps_in_cuda",
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Wed Oct 16 05:28:35 UTC 2024 - 53.5K bytes - Viewed (0) -
tensorflow/c/c_api_function_test.cc
for (auto input : inputs) { TF_AddInput(desc, input); } // Set device to CPU because some ops inside the function might not be // available on GPU. TF_SetDevice(desc, "/cpu:0"); *op = TF_FinishOperation(desc, s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); ASSERT_NE(*op, nullptr); } FunctionDef fdef() {
Registered: Tue Nov 05 12:39:12 UTC 2024 - Last Modified: Thu Jul 20 22:08:54 UTC 2023 - 63.6K bytes - Viewed (0)