Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 22 for Suda (2.33 sec)

  1. ci/official/containers/linux_arm64/cuda.packages.txt

    libcudnn8=8.9.6.50-1+cuda12.2
    libcudnn8-dev=8.9.6.50-1+cuda12.2
    
    # This can be removed once NVIDIA publishes a cuda-12.3.2 Docker image.
    # For now it ensures that we install at least version 12.3.107 of PTXAS,
    # since 12.3.103 has a bug.
    Plain Text
    - Registered: Tue May 07 12:40:20 GMT 2024
    - Last Modified: Mon Jan 08 09:32:19 GMT 2024
    - 368 bytes
    - Viewed (1)
  2. tensorflow/c/BUILD

    load(
        "//tensorflow:tensorflow.bzl",
        "check_deps",
        "if_google",
        "if_not_mobile",
        "tf_cc_test",
        "tf_copts",
        "tf_cuda_library",
        "tf_custom_op_library",
        "tf_kernel_library",
    )
    load("//tensorflow:tensorflow.default.bzl", "filegroup", "tf_cuda_cc_test")
    load(
        "//tensorflow/core/tpu:build_defs.bzl",
        "if_libtpu_tf_status",
    )
    
    package(
    Plain Text
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Wed Mar 27 18:00:18 GMT 2024
    - 30.3K bytes
    - Viewed (0)
  3. ci/official/README.md

    #    build. This should also match the system you're using--you cannot build
    #    the TF MacOS package from Linux.
    #      Ex. linux_x86        -- x86_64 Linux platform
    #      Ex. linux_x86_cuda   -- x86_64 Linux platform, with Nvidia CUDA support
    #      Ex. macos_arm64      -- arm64 MacOS platform
    # 3. Add modifiers. Some modifiers for local execution are:
    #      Ex. disk_cache -- Use a local cache
    Plain Text
    - Registered: Tue May 07 12:40:20 GMT 2024
    - Last Modified: Thu Feb 01 03:21:19 GMT 2024
    - 8K bytes
    - Viewed (0)
  4. CONTRIBUTING.md

        export flags="--config=opt -k"
        ```
    
        If the tests are to be run on the GPU, add CUDA paths to LD_LIBRARY_PATH and
        add the `cuda` option flag
    
        ```bash
        export LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:$LD_LIBRARY_PATH"
        export flags="--config=opt --config=cuda -k"
        ```
    
        For example, to run all tests under tensorflow/python, do:
    
        ```bash
    Plain Text
    - Registered: Tue May 07 12:40:20 GMT 2024
    - Last Modified: Thu Mar 21 11:45:51 GMT 2024
    - 15.6K bytes
    - Viewed (0)
  5. tensorflow/BUILD

    # Config setting that is satisfied when TensorFlow is being built with CUDA
    # support through e.g. `--config=cuda` (or `--config=cuda_clang` in OSS).
    alias(
        name = "is_cuda_enabled",
        actual = if_oss(
            "@local_config_cuda//:is_cuda_enabled",
            "@local_config_cuda//cuda:using_clang",
        ),
    )
    
    # Config setting that is satisfied when CUDA device code should be compiled
    Plain Text
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Tue Apr 09 18:15:11 GMT 2024
    - 53.4K bytes
    - Viewed (8)
  6. tensorflow/c/eager/BUILD

    load(
        "//tensorflow:tensorflow.bzl",
        "tf_cc_test",
        "tf_copts",
        "tf_cuda_cc_test",
        "tf_cuda_library",
    )
    load("//tensorflow:tensorflow.default.bzl", "cc_header_only_library", "filegroup")
    load(
        "//tensorflow/core/platform:build_config_root.bzl",
        "tf_cuda_tests_tags",
    )
    load("//tensorflow/core/platform:rules_cc.bzl", "cc_library")
    
    package(
    Plain Text
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Apr 11 23:52:39 GMT 2024
    - 33.3K bytes
    - Viewed (0)
  7. ci/official/envs/linux_x86_cuda

    source ci/official/envs/linux_x86
    TFCI_BAZEL_COMMON_ARGS="--repo_env=TF_PYTHON_VERSION=$TFCI_PYTHON_VERSION --config release_gpu_linux"
    TFCI_BAZEL_TARGET_SELECTING_CONFIG_PREFIX=linux_cuda
    TFCI_BUILD_PIP_PACKAGE_ARGS="--repo_env=WHEEL_NAME=tensorflow"
    TFCI_DOCKER_ARGS="--gpus all"
    TFCI_LIB_SUFFIX="-gpu-linux-x86_64"
    Plain Text
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Fri Jan 19 00:24:30 GMT 2024
    - 1K bytes
    - Viewed (0)
  8. RELEASE.md

    *   Move `layers_dense_variational_impl.py` to `layers_dense_variational.py`.
    
    ## Known Bugs
    
    *   Using XLA:GPU with CUDA 9 and CUDA 9.1 results in garbage results and/or
        `CUDA_ILLEGAL_ADDRESS` failures.
    
        Google discovered in mid-December 2017 that the PTX-to-SASS compiler in CUDA
        9 and CUDA 9.1 sometimes does not properly compute the carry bit when
    Plain Text
    - Registered: Tue May 07 12:40:20 GMT 2024
    - Last Modified: Mon Apr 29 19:17:57 GMT 2024
    - 727.7K bytes
    - Viewed (8)
  9. ci/official/utilities/code_check_full.bats

        --@local_config_cuda//:enable_cuda \
        --define framework_shared_object=false \
        "somepath(//tensorflow/tools/pip_package:wheel, " \
        "@local_config_cuda//cuda:cudart + "\
        "@local_config_cuda//cuda:cudart + "\
        "@local_config_cuda//cuda:cuda_driver + "\
        "@local_config_cuda//cuda:cudnn + "\
        "@local_config_cuda//cuda:curand + "\
        "@local_config_cuda//cuda:cusolver + "\
    Plain Text
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Wed Mar 06 21:54:13 GMT 2024
    - 13.2K bytes
    - Viewed (0)
  10. .bazelrc

    build:mkl_aarch64_threadpool -c opt
    
    # CUDA: This config refers to building CUDA op kernels with nvcc.
    build:cuda --repo_env TF_NEED_CUDA=1
    build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain
    build:cuda --@local_config_cuda//:enable_cuda
    
    # CUDA: This config refers to building CUDA op kernels with clang.
    build:cuda_clang --config=cuda
    # Enable TensorRT optimizations https://developer.nvidia.com/tensorrt
    Plain Text
    - Registered: Tue May 07 12:40:20 GMT 2024
    - Last Modified: Thu May 02 19:34:20 GMT 2024
    - 52.8K bytes
    - Viewed (2)
Back to top