- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 12 for Cuda (0.03 sec)
-
.bazelrc
# release_cpu_linux: Toolchain and CUDA options for Linux CPU builds. # release_gpu_linux: Toolchain and CUDA options for Linux GPU builds. # release_cpu_macos: Toolchain and CUDA options for MacOS CPU builds. # release_cpu_windows: Toolchain and CUDA options for Windows CPU builds.
Registered: Tue Oct 29 12:39:09 UTC 2024 - Last Modified: Mon Oct 28 22:02:31 UTC 2024 - 51.3K bytes - Viewed (0) -
ci/official/utilities/rename_and_verify_wheels.sh
fi fi # VERY basic check to ensure the [and-cuda] package variant is installable. # Checks TFCI_BAZEL_COMMON_ARGS for "gpu" or "cuda", implying that the test is # relevant. All of the GPU test machines have CUDA installed via other means, # so I am not sure how to verify that the dependencies themselves are valid for # the moment. if [[ "$TFCI_BAZEL_COMMON_ARGS" =~ gpu|cuda ]]; then
Registered: Tue Oct 29 12:39:09 UTC 2024 - Last Modified: Wed Oct 02 21:18:17 UTC 2024 - 4.3K bytes - Viewed (0) -
configure.py
write_repo_env_to_bazelrc('cuda', env_var, local_path) def set_other_cuda_vars(environ_cp): """Set other CUDA related variables.""" # If CUDA is enabled, always use GPU during build and test. if environ_cp.get('TF_CUDA_CLANG') == '1': write_to_bazelrc('build --config=cuda_clang') else: write_to_bazelrc('build --config=cuda')
Registered: Tue Oct 29 12:39:09 UTC 2024 - Last Modified: Wed Oct 02 22:16:02 UTC 2024 - 48.2K bytes - Viewed (1) -
ci/official/containers/linux_arm64/Dockerfile
# Install devtoolset devel dependencies COPY setup.sources.sh /setup.sources.sh COPY setup.packages.sh /setup.packages.sh COPY devel.packages.txt /devel.packages.txt COPY cuda.packages.txt /cuda.packages.txt RUN /setup.sources.sh && /setup.packages.sh /devel.packages.txt # Install various tools. # - bats: bash unit testing framework
Registered: Tue Oct 29 12:39:09 UTC 2024 - Last Modified: Wed Oct 09 23:55:37 UTC 2024 - 4.4K bytes - Viewed (0) -
ci/official/utilities/code_check_full.bats
done < $BATS_TEST_TMPDIR/missing_deps exit 1 fi } # The Python package is not allowed to depend on any CUDA packages. @test "Pip package doesn't depend on CUDA" { bazel cquery \ --experimental_cc_shared_library \ --@local_config_cuda//:enable_cuda \ --@local_config_cuda//cuda:include_cuda_libs=false \
Registered: Tue Oct 29 12:39:09 UTC 2024 - Last Modified: Wed Oct 23 18:48:35 UTC 2024 - 13.6K bytes - Viewed (0) -
WORKSPACE
tf_workspace0() load( "@local_tsl//third_party/gpus/cuda/hermetic:cuda_json_init_repository.bzl", "cuda_json_init_repository", ) cuda_json_init_repository() load( "@cuda_redist_json//:distributions.bzl", "CUDA_REDISTRIBUTIONS", "CUDNN_REDISTRIBUTIONS", ) load( "@local_tsl//third_party/gpus/cuda/hermetic:cuda_redist_init_repositories.bzl", "cuda_redist_init_repositories",
Registered: Tue Oct 29 12:39:09 UTC 2024 - Last Modified: Fri Oct 11 16:49:28 UTC 2024 - 3K bytes - Viewed (0) -
CONTRIBUTING.md
flag. ```bash export flags="--config=opt --config=cuda -k" ``` * For TensorFlow versions prior v.2.18.0: Add CUDA paths to LD_LIBRARY_PATH and add the `cuda` option flag. ```bash export LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:$LD_LIBRARY_PATH"
Registered: Tue Oct 29 12:39:09 UTC 2024 - Last Modified: Wed Oct 23 06:20:12 UTC 2024 - 15.9K bytes - Viewed (0) -
tensorflow/BUILD
# Config setting that is satisfied when TensorFlow is being built with CUDA # support through e.g. `--config=cuda` (or `--config=cuda_clang` in OSS). alias( name = "is_cuda_enabled", actual = if_oss( "@local_config_cuda//:is_cuda_enabled", "@local_config_cuda//cuda:using_config_cuda", ), ) # Config setting that is satisfied when CUDA device code should be compiled
Registered: Tue Oct 29 12:39:09 UTC 2024 - Last Modified: Wed Oct 16 05:28:35 UTC 2024 - 53.5K bytes - Viewed (0) -
ci/official/wheel.sh
# limitations under the License. # ============================================================================== source "${BASH_SOURCE%/*}/utilities/setup.sh" # Record GPU count and CUDA version status if [[ "$TFCI_NVIDIA_SMI_ENABLE" == 1 ]]; then tfrun nvidia-smi fi # Update the version numbers for Nightly only if [[ "$TFCI_NIGHTLY_UPDATE_VERSION_ENABLE" == 1 ]]; then
Registered: Tue Oct 29 12:39:09 UTC 2024 - Last Modified: Mon Oct 14 23:45:36 UTC 2024 - 2.2K bytes - Viewed (0) -
ci/official/envs/linux_arm64
TFCI_BAZEL_TARGET_SELECTING_CONFIG_PREFIX=linux_arm64 # Note: this is not set to "--cpu", because that changes the package name # to tensorflow_cpu. These ARM builds are supposed to have the name "tensorflow" # despite lacking Nvidia CUDA support. TFCI_BUILD_PIP_PACKAGE_ARGS="--repo_env=WHEEL_NAME=tensorflow" TFCI_DOCKER_ENABLE=1 TFCI_DOCKER_IMAGE=us-central1-docker.pkg.dev/tensorflow-sigs/tensorflow/build-arm64:tf-2-18-multi-python TFCI_DOCKER_PULL_ENABLE=1
Registered: Tue Oct 29 12:39:09 UTC 2024 - Last Modified: Mon Oct 14 23:45:36 UTC 2024 - 1.5K bytes - Viewed (0)