Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 27 for Account (0.18 sec)

  1. ci/official/utilities/setup_macos.sh

      pip install twine==3.6.0
    fi
    
    # Scheduled nightly and release builds upload build artifacts (Pip packages,
    # Libtensorflow archives) to GCS buckets. TFCI Mac VMs need to authenticate as
    # a service account that has the right permissions to be able to do so.
    set +x
    if [[ -n "${GOOGLE_APPLICATION_CREDENTIALS:-}" ]]; then
      # Python 3.12 removed the module `imp` which is needed by gcloud CLI so we set
    Shell Script
    - Registered: Tue Apr 23 12:39:09 GMT 2024
    - Last Modified: Thu Feb 15 15:23:28 GMT 2024
    - 6.2K bytes
    - Viewed (0)
  2. ci/official/requirements_updater/README.md

          )
       ```
    
    4) Add the version to `SUPPORTED_VERSIONS` in `updater.sh`, and
       `release_updater.sh`
    
    5) Run the `updater.sh` shell script. \
       If the base requirements file hasn't yet been updated to account for the new
       Python version, which will require different versions for at least some
       dependencies, it will need to be updated now, for the script to run
       successfully.
    
    Plain Text
    - Registered: Tue Apr 23 12:39:09 GMT 2024
    - Last Modified: Tue Jan 23 02:14:00 GMT 2024
    - 3.9K bytes
    - Viewed (0)
  3. ci/official/envs/versions_upload

    # pretend the path is a directory.
    TFCI_ARTIFACT_FINAL_GCS_ENABLE=1
    TFCI_ARTIFACT_FINAL_GCS_SA_PATH="${KOKORO_KEYSTORE_DIR}/73361_tensorflow_release_binary_uploader_service_account"
    TFCI_ARTIFACT_FINAL_GCS_URI="gs://tensorflow/versions/"
    TFCI_ARTIFACT_FINAL_PYPI_ARGS="--config-file=$KOKORO_KEYSTORE_DIR/73361_tensorflow_pypirc_using_global_api_token --repository pypi-warehouse"
    TFCI_ARTIFACT_FINAL_PYPI_ENABLE=1
    Plain Text
    - Registered: Tue Apr 23 12:39:09 GMT 2024
    - Last Modified: Fri Jan 19 19:07:48 GMT 2024
    - 1.6K bytes
    - Viewed (0)
  4. .github/workflows/arm-cd.yml

            shell: bash
    Others
    - Registered: Tue Apr 23 12:39:09 GMT 2024
    - Last Modified: Tue Mar 05 10:24:16 GMT 2024
    - 3K bytes
    - Viewed (1)
  5. ci/official/envs/nightly_upload

    # pretend the path is a directory.
    # 1. Upload nightlies
    TFCI_ARTIFACT_FINAL_GCS_ENABLE=1
    TFCI_ARTIFACT_FINAL_GCS_SA_PATH="${KOKORO_KEYSTORE_DIR}/73361_tensorflow_release_binary_uploader_service_account"
    TFCI_ARTIFACT_FINAL_GCS_URI="gs://tensorflow/nightly/"
    TFCI_ARTIFACT_FINAL_PYPI_ARGS="--config-file=$KOKORO_KEYSTORE_DIR/73361_tensorflow_pypirc_using_global_api_token --repository pypi-warehouse"
    TFCI_ARTIFACT_FINAL_PYPI_ENABLE=1
    Plain Text
    - Registered: Tue Apr 23 12:39:09 GMT 2024
    - Last Modified: Fri Jan 19 19:07:48 GMT 2024
    - 1.7K bytes
    - Viewed (0)
  6. ci/official/upload.sh

    # gs://tensorflow/nightly/2.16.0-dev20240105 (nightly), overwriting previous values.
    if [[ "$TFCI_ARTIFACT_FINAL_GCS_ENABLE" == 1 ]]; then
      gcloud auth activate-service-account --key-file="$TFCI_ARTIFACT_FINAL_GCS_SA_PATH"
    
      # $TF_VER_FULL will resolve to e.g. "2.15.0-rc2". Since $TF_VER_FULL comes
      # from get_versions.sh, which must be run *after* update_version.py, FINAL_URI
    Shell Script
    - Registered: Tue Apr 23 12:39:09 GMT 2024
    - Last Modified: Wed Jan 24 20:52:12 GMT 2024
    - 2.8K bytes
    - Viewed (0)
  7. tensorflow/c/eager/tape.h

              }
            }
          }
          auto usage_count_it = state.tensor_usage_counts.find(id);
          if (usage_count_it == state.tensor_usage_counts.end()) {
            VLOG(1) << "Tensor " << id << " not used";
            continue;
          }
          usage_count_it->second--;
          if (usage_count_it->second > 0) {
            VLOG(1) << "Tensor " << id << " usage count " << usage_count_it->second;
            continue;
          }
    C
    - Registered: Tue Apr 23 12:39:09 GMT 2024
    - Last Modified: Tue Apr 02 12:40:29 GMT 2024
    - 47.2K bytes
    - Viewed (1)
  8. configure.py

      # ROCm / CUDA are mutually exclusive.
      # At most 1 GPU platform can be configured.
      gpu_platform_count = 0
      if environ_cp.get('TF_NEED_ROCM') == '1':
        gpu_platform_count += 1
      if environ_cp.get('TF_NEED_CUDA') == '1':
        gpu_platform_count += 1
      if gpu_platform_count >= 2:
        raise UserInputError('CUDA / ROCm are mututally exclusive. '
    Python
    - Registered: Tue Apr 23 12:39:09 GMT 2024
    - Last Modified: Mon Apr 15 18:25:36 GMT 2024
    - 53.8K bytes
    - Viewed (0)
  9. tensorflow/c/c_api_experimental.h

    // a) ConfigProto.optimizer_options.global_jit_level is set to ON_1 if
    // `enable_xla_compilation` is non-zero, and OFF otherwise.
    // b) ConfigProto.gpu_options.allow_growth is set to `gpu_memory_allow_growth`.
    // c) ConfigProto.device_count is set to `num_cpu_devices`.
    TF_CAPI_EXPORT extern TF_Buffer* TF_CreateConfig(
        unsigned char enable_xla_compilation, unsigned char gpu_memory_allow_growth,
        unsigned int num_cpu_devices);
    
    C
    - Registered: Tue Apr 23 12:39:09 GMT 2024
    - Last Modified: Thu Apr 27 21:07:00 GMT 2023
    - 15.1K bytes
    - Viewed (0)
  10. tensorflow/c/c_api_experimental.cc

      }
    
      auto* gpu_options = config.mutable_gpu_options();
      gpu_options->set_allow_growth(gpu_memory_allow_growth);
    
      (*config.mutable_device_count())["CPU"] = num_cpu_devices;
    
      // TODO(b/113217601): This is needed for EagerContext::runner_ to use a
      // threadpool, so that we avoid the possibility of running the runner_ in the
    C++
    - Registered: Tue Apr 23 12:39:09 GMT 2024
    - Last Modified: Mon Apr 15 03:35:10 GMT 2024
    - 29.4K bytes
    - Viewed (0)
Back to top