Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 14 for tpu (0.02 sec)

  1. ci/official/containers/linux_arm64/devel.usertools/aarch64.bazelrc

    # bazel test invocation as normal.
    test:nonpip_filters --test_tag_filters=-no_oss,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only,-no_aarch64,-no_oss_py38,-no_oss_py39,-no_oss_py310
    test:nonpip_filters --build_tag_filters=-no_oss,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only,-no_aarch64,-no_oss_py38,-no_oss_py39,-no_oss_py310
    Registered: Tue Nov 05 12:39:12 UTC 2024
    - Last Modified: Fri Jul 12 20:16:57 UTC 2024
    - 5.7K bytes
    - Viewed (0)
  2. ci/official/containers/linux_arm64/devel.usertools/aarch64_clang.bazelrc

    # bazel test invocation as normal.
    test:nonpip_filters --test_tag_filters=-no_oss,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only,-no_aarch64,-no_oss_py38,-no_oss_py39,-no_oss_py310
    test:nonpip_filters --build_tag_filters=-no_oss,-oss_serial,-gpu,-tpu,-benchmark-test,-v1only,-no_aarch64,-no_oss_py38,-no_oss_py39,-no_oss_py310
    Registered: Tue Nov 05 12:39:12 UTC 2024
    - Last Modified: Fri Jul 12 20:16:57 UTC 2024
    - 6.2K bytes
    - Viewed (0)
  3. ci/official/envs/linux_x86_tpu_build

    source ci/official/envs/linux_x86_build
    TFCI_BAZEL_COMMON_ARGS="--repo_env=HERMETIC_PYTHON_VERSION=$TFCI_PYTHON_VERSION --config release_cpu_linux --config=tpu"
    TFCI_BAZEL_TARGET_SELECTING_CONFIG_PREFIX=linux_tpu
    TFCI_BUILD_PIP_PACKAGE_ARGS="--repo_env=WHEEL_NAME=tensorflow_tpu"
    TFCI_LIB_SUFFIX="-tpu-linux-x86_64"
    TFCI_WHL_BAZEL_TEST_ENABLE=0
    TFCI_WHL_IMPORT_TEST_ENABLE=0
    TFCI_WHL_SIZE_LIMIT=580M
    Registered: Tue Nov 05 12:39:12 UTC 2024
    - Last Modified: Tue Nov 05 08:00:04 UTC 2024
    - 1.2K bytes
    - Viewed (0)
  4. .bazelrc

    #     dbg:              Build with debug info
    #
    # TF version options;
    #     v2: Build TF v2
    #
    # Feature and Third party library support options:
    #     xla:          Build TF with XLA
    #     tpu:          Build TF with TPU support
    #     cuda:         Build with CUDA support.
    #     cuda_clang    Build with CUDA Clang support.
    #     rocm:         Build with AMD GPU support (rocm)
    #     mkl:          Enable full mkl support.
    Registered: Tue Nov 05 12:39:12 UTC 2024
    - Last Modified: Mon Oct 28 22:02:31 UTC 2024
    - 51.3K bytes
    - Viewed (0)
  5. ci/official/envs/linux_x86_tpu

    source ci/official/envs/linux_x86
    TFCI_BAZEL_COMMON_ARGS="--repo_env=HERMETIC_PYTHON_VERSION=$TFCI_PYTHON_VERSION --config release_cpu_linux --config=tpu"
    TFCI_BAZEL_TARGET_SELECTING_CONFIG_PREFIX=linux_tpu
    TFCI_BUILD_PIP_PACKAGE_ARGS="--repo_env=WHEEL_NAME=tensorflow_tpu"
    TFCI_LIB_SUFFIX="-tpu-linux-x86_64"
    TFCI_WHL_BAZEL_TEST_ENABLE=0
    TFCI_WHL_IMPORT_TEST_ENABLE=0
    TFCI_WHL_SIZE_LIMIT=580M
    Registered: Tue Nov 05 12:39:12 UTC 2024
    - Last Modified: Mon Oct 14 23:45:36 UTC 2024
    - 1.2K bytes
    - Viewed (0)
  6. .github/bot_config.yml

             * It has an added advantage since you can you easily switch to different hardware accelerators (cpu, gpu, tpu) as per the task.
             * All you need is a good internet connection and you are all set.
          * Try to build TF from sources by changing CPU optimization flags.
       
       *Please let us know if this helps.*
       
    Registered: Tue Nov 05 12:39:12 UTC 2024
    - Last Modified: Mon Jul 15 05:00:54 UTC 2024
    - 4K bytes
    - Viewed (0)
  7. tensorflow/BUILD

    config_setting(
        name = "disable_mlir_bridge",
        define_values = {"enable_mlir_bridge": "false"},
        visibility = ["//visibility:public"],
    )
    
    # This flag enables experimental TPU support
    bool_flag(
        name = "enable_tpu_support",
        build_setting_default = False,
    )
    
    config_setting(
        name = "with_tpu_support_define",
        define_values = {"with_tpu_support": "true"},
    Registered: Tue Nov 05 12:39:12 UTC 2024
    - Last Modified: Wed Oct 16 05:28:35 UTC 2024
    - 53.5K bytes
    - Viewed (0)
  8. tensorflow/c/eager/parallel_device/parallel_device_test.cc

          TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
      ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
    
      // Skip the test if no TPU is available.
      std::unique_ptr<TF_DeviceList, decltype(&TF_DeleteDeviceList)> devices(
          TFE_ContextListDevices(context.get(), status.get()), TF_DeleteDeviceList);
    Registered: Tue Nov 05 12:39:12 UTC 2024
    - Last Modified: Tue Aug 06 23:56:17 UTC 2024
    - 29.4K bytes
    - Viewed (0)
  9. tensorflow/c/eager/parallel_device/parallel_device.cc

        const TFE_OpAttrs* attributes, int expected_max_outputs,
        TF_Status* status) {
      absl::optional<std::vector<MaybeParallelTensorOwned>> result;
      // TODO(allenl): We should remove "TPU" from these op names at the very least,
      // or consider other ways of packing/unpacking parallel tensors.
      if (operation_name == std::string("TPUReplicatedInput")) {
    Registered: Tue Nov 05 12:39:12 UTC 2024
    - Last Modified: Mon Oct 21 04:14:14 UTC 2024
    - 18.3K bytes
    - Viewed (0)
  10. RELEASE.md

    *   `tf.tpu.experimental.embedding`:
    
        *   `tf.tpu.experimental.embedding.FeatureConfig` now takes an additional
            argument `output_shape` which can specify the shape of the output
            activation for the feature.
        *   `tf.tpu.experimental.embedding.TPUEmbedding` now has the same behavior
            as `tf.tpu.experimental.embedding.serving_embedding_lookup` which can
    Registered: Tue Nov 05 12:39:12 UTC 2024
    - Last Modified: Tue Oct 22 14:33:53 UTC 2024
    - 735.3K bytes
    - Viewed (0)
Back to top