Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 6 of 6 for kven (0.59 sec)

  1. ci/official/README.md

    #      compilers, etc. can cause undefined behavior such as build failures
    #      or tests passing incorrectly.
    #    - Automatic LLVM updates are known to extend build time even with
    #      the cache; this is unavoidable.
    export TFCI=py311,linux_x86,public_cache,disk_cache
    
    # Recommended: Configure Docker. (Linux only)
    #
    #   TF uses hub.docker.com/r/tensorflow/build containers for CI,
    Plain Text
    - Registered: Tue May 07 12:40:20 GMT 2024
    - Last Modified: Thu Feb 01 03:21:19 GMT 2024
    - 8K bytes
    - Viewed (0)
  2. tensorflow/c/eager/c_api_experimental.h

        int64_t init_timeout_in_ms, int retries, TF_Status* status,
        bool clear_existing_contexts);
    
    // Checks whether a remote worker is alive or not. This will return true even if
    // the context doesn't exist on the remote worker.
    TF_CAPI_EXPORT extern bool TFE_ContextCheckAlive(TFE_Context* ctx,
                                                     const char* worker_name,
    C
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Wed Feb 21 22:37:46 GMT 2024
    - 39.5K bytes
    - Viewed (0)
  3. RELEASE.md

        *   Parameterless `tf.function`s are assumed to have an empty `input_signature` instead of an undefined one even if the `input_signature` is unspecified.
        *   `tf.types.experimental.TraceType` now requires an additional `placeholder_value` method to be defined.
    Plain Text
    - Registered: Tue May 07 12:40:20 GMT 2024
    - Last Modified: Mon Apr 29 19:17:57 GMT 2024
    - 727.7K bytes
    - Viewed (8)
  4. tensorflow/BUILD

    config_setting(
        name = "with_xla_support",
        define_values = {"with_xla_support": "true"},
        visibility = ["//visibility:public"],
    )
    
    # By default, XLA GPU is compiled into tensorflow when building with
    # --config=cuda even when `with_xla_support` is false. The config setting
    # here allows us to override the behavior if needed.
    config_setting(
        name = "no_xla_deps_in_cuda",
        define_values = {"no_xla_deps_in_cuda": "true"},
    Plain Text
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Tue Apr 09 18:15:11 GMT 2024
    - 53.4K bytes
    - Viewed (8)
  5. ci/official/containers/linux_arm64/devel.usertools/wheel_verification.bats

            *manylinux*)     LARGEST_OK_SIZE=580 ;;
            # Unknown:
            *)
                echo "The wheel's name is in an unknown format."
                exit 1
                ;;
        esac
        # >&3 forces output in bats even if the test passes. See
        # https://bats-core.readthedocs.io/en/stable/writing-tests.html#printing-to-the-terminal
        echo "# Size of $TF_WHEEL is $WHEEL_MEGABYTES / $LARGEST_OK_SIZE megabytes." >&3
    Plain Text
    - Registered: Tue May 07 12:40:20 GMT 2024
    - Last Modified: Tue Jan 23 02:14:00 GMT 2024
    - 2.7K bytes
    - Viewed (0)
  6. tensorflow/c/eager/parallel_device/parallel_device_lib.cc

           ++device_index) {
        DeviceThread* device_thread = device_threads_[device_index].get();
        per_device_output_tensors.push_back(device_thread->Join(status));
        // We will run every Join even if there are bad statuses in case the user
        // wants to recover and continue running ops on the parallel device (which
        // would otherwise deadlock).
        if (TF_GetCode(status) != TF_OK &&
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Fri Feb 09 07:47:20 GMT 2024
    - 25.4K bytes
    - Viewed (1)
Back to top