Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 27 for isCall (0.22 sec)

  1. tensorflow/c/eager/c_api_experimental.h

      // to pin to the physical device.
      //
      // This function is guaranteed to be called only when all of the custom-device
      // inputs are on this device.
      bool (*shall_pin_to_this_device)(const TFE_Op* op, TF_Status* s) = nullptr;
    } TFE_CustomDevice;
    
    // Registers a custom device for use with eager execution.
    //
    // Eager operations may be placed on this device, e.g.  `with
    C
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Wed Feb 21 22:37:46 GMT 2024
    - 39.5K bytes
    - Viewed (0)
  2. ci/official/utilities/rename_and_verify_wheels.sh

      echo '(search for TFCI_WHL_SIZE_LIMIT to change it)'
      ls -sh *.whl
      exit 2
    fi
    
    # Quick install checks
    venv=$(mktemp -d)
    "python${TFCI_PYTHON_VERSION}" -m venv "$venv"
    python="$venv/bin/python3"
    "$python" -m pip install *.whl $TFCI_PYTHON_VERIFY_PIP_INSTALL_ARGS
    if [[ "$TFCI_WHL_IMPORT_TEST_ENABLE" == "1" ]]; then
    Shell Script
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Wed Mar 27 21:16:27 GMT 2024
    - 3.4K bytes
    - Viewed (0)
  3. ci/official/requirements_updater/requirements.in

    h5py >= 3.10.0
    lit ~= 17.0.2
    opt_einsum == 3.3.0
    astunparse == 1.6.3
    dill == 0.3.7
    astor == 0.7.1
    typing_extensions == 4.8.0
    gast == 0.4.0
    termcolor == 2.3.0
    wrapt == 1.16.0
    tblib == 2.0.0
    
    # Install tensorboard, and keras
    # Note that here we want the latest version that matches TF major.minor version
    # Note that we must use nightly here as these are used in nightly jobs
    # For release jobs, we will pin these on the release branch
    Plain Text
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Feb 15 07:17:18 GMT 2024
    - 806 bytes
    - Viewed (0)
  4. tensorflow/c/eager/BUILD

            "//tensorflow/core/platform:status",
            "@com_google_absl//absl/container:flat_hash_set",
        ],
    )
    
    tf_cuda_cc_test(
        name = "gradients_test",
        size = "small",
        srcs = [
            "gradients_test.cc",
        ],
        args = ["--heap_check="],
        tags = tf_cuda_tests_tags() + ["nomac"],
        deps = [
            ":abstract_context",
            ":abstract_tensor_handle",
    Plain Text
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Apr 11 23:52:39 GMT 2024
    - 33.3K bytes
    - Viewed (0)
  5. RELEASE.md

        no longer include NCCL in the binary install. TensorFlow usage with multiple
        GPUs and NCCL requires upgrade to
        [NCCL 2.2](https://developer.nvidia.com/nccl). See updated install guides:
        [TensorFlow GPU support](https://www.tensorflow.org/install/gpu) and
        [Build TensorFlow from source](https://www.tensorflow.org/install/source).
    Plain Text
    - Registered: Tue May 07 12:40:20 GMT 2024
    - Last Modified: Mon Apr 29 19:17:57 GMT 2024
    - 727.7K bytes
    - Viewed (8)
  6. tensorflow/c/eager/c_api.cc

        TF_Status status;
        // Let this custom device choose the device to pin this op on if it
        // implements the pinning function.
        if (device_.shall_pin_to_this_device != nullptr) {
          return device_.shall_pin_to_this_device(tensorflow::wrap(op), &status);
        }
        return errors::Unimplemented("No custom device pinning implementation.");
      }
    
     private:
      TFE_Context* context_;
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Tue Mar 12 20:00:09 GMT 2024
    - 43.9K bytes
    - Viewed (2)
  7. tensorflow/BUILD

            "tf_private_symbols.lds",
        ],
    )
    
    genrule(
        name = "install_headers",
        srcs = [
            "//tensorflow/c:headers",
            "//tensorflow/c/eager:headers",
            "//tensorflow/cc:headers",
            "//tensorflow/core:headers",
            "@local_xla//xla/stream_executor:stream_executor_install_hdrs",
        ],
        outs = ["include"],
        cmd = """
        mkdir $@
    Plain Text
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Tue Apr 09 18:15:11 GMT 2024
    - 53.4K bytes
    - Viewed (8)
  8. .bazelrc

    # strongly recommend that you migrate to Clang as your compiler for TensorFlow
    # Linux builds. Instructions are available in the official documentation:
    # https://www.tensorflow.org/install/source#install_clang_recommended_linux_only
    # Another good option is to use our Docker containers to build and test TF:
    # https://github.com/tensorflow/tensorflow/tree/master/tensorflow/tools/tf_sig_build_dockerfiles.
    Plain Text
    - Registered: Tue May 07 12:40:20 GMT 2024
    - Last Modified: Thu May 02 19:34:20 GMT 2024
    - 52.8K bytes
    - Viewed (2)
  9. configure.py

                # Items below are for backwards compatibility when not using
                # TF_CUDA_PATHS.
                'CUDA_TOOLKIT_PATH',
                'CUDNN_INSTALL_PATH',
                'NCCL_INSTALL_PATH',
                'NCCL_HDR_PATH',
                'TENSORRT_INSTALL_PATH'
            ]
            # Note: set_action_env_var above already writes to bazelrc.
            for name in cuda_env_names:
              if name in environ_cp:
    Python
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Mon Apr 15 18:25:36 GMT 2024
    - 53.8K bytes
    - Viewed (1)
  10. ci/official/envs/macos_arm64

    TFCI_WHL_BAZEL_TEST_ENABLE=1
    TFCI_WHL_SIZE_LIMIT=240M
    TFCI_WHL_SIZE_LIMIT_ENABLE=1
    
    # 3.11 is the system python on our images
    case $TFCI_PYTHON_VERSION in
    3.11)
      TFCI_MACOS_PYENV_INSTALL_ENABLE=0
      ;;
    *)
      TFCI_MACOS_PYENV_INSTALL_ENABLE=1
      ;;
    Plain Text
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Fri Jan 19 00:24:30 GMT 2024
    - 1.3K bytes
    - Viewed (0)
Back to top