Search Options

Results per page
Sort
Preferred Languages
Advance

Results 161 - 170 of 245 for tpu0 (0.03 sec)

  1. tensorflow/compiler/mlir/tensorflow/transforms/prepare_tpu_computation_for_tf_export.cc

              func.getLoc(), op.getOperandTypes(), /*dynamic_key=*/dynamic_key,
              op.getSendKeyAttr(),
              /*device_ordinal=*/rewriter.getI64IntegerAttr(0),
              rewriter.getStringAttr("TPU"));
          for (auto result :
               llvm::zip(cloned_func.getArguments(), recv_at_host->getResults())) {
            std::get<0>(result).replaceAllUsesWith(std::get<1>(result));
          }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/tpu_identity_pruning.cc

    #include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
    #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
    
    namespace mlir {
    namespace TFTPU {
    
    namespace {
    
    // This pass removes Identity/IdentityN ops from the TPU computation and
    // reachable functions.
    // TODO(lyandy): Remove this pass once resource op lifting is migrated to use
    // resource alias analysis and support region based control flow. Removing
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Oct 05 23:50:19 UTC 2022
    - 4.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.proto

      // TODO(b/263528090): Check the condition when this feature is beneficial.
      bool enable_two_input_tensors = 11;
    
      // Supports TPU model quantization. If the target model for the quantization
      // is already converted for TPU, this flag may be helpful. Note that this
      // feature may be unstable as it is under the experimental stage.
      bool experimental_enable_tpu_model_support = 12;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 19 06:31:19 UTC 2024
    - 9.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tfrt/tests/mlrt/tpu_conversions.mlir

    // RUN: tf-tfrt-opt --split-input-file -pass-pipeline='builtin.module(pre-parallel-tf-to-mlrt{use-tpu-host-allocator-for-inputs=true},tf-mlrt-parallelization{tfrt-cost-threshold=4},tf-to-mlrt)'  %s | FileCheck %s --dump-input=fail --dump-input-filter=all
    
    func.func @callee(%arg0: tensor<i32>, %arg1: tensor<i32>) -> (tensor<i32>) {
      func.return %arg0: tensor<i32>
    }
    
    // CHECK-LABEL: func @batch_function
    func.func @batch_function(%arg0: tensor<i32>) -> (tensor<i32>) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Oct 04 21:25:31 UTC 2023
    - 11K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/xla_compile_util.cc

      // We store information about the JIT-compiled XLA computation in the
      // ResourceMgr. The DeviceCompiler (which contains the DeviceCompilationCache)
      // is stored in the tfrt_global ResourceMgr for TPU and the Device ResourceMgr
      // for CPU/GPU. This is to make sure the DeviceCompiler's lifecycle is
      // maintained appropriately.
      ResourceMgr* rm = nullptr;
      if (device_type == DEVICE_TPU) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 4.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/ir/tf_side_effects.h

    };
    
    struct DatasetIterator : ::mlir::SideEffects::Resource::Base<DatasetIterator> {
      StringRef getName() final { return "DatasetIterator"; }
    };
    
    // Special resource type to track TPU Embedding specific ops, which must execute
    // but do not have side effects with one another or with resource variable ops.
    struct TPUEmbedding : ::mlir::SideEffects::Resource::Base<TPUEmbedding> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Dec 26 18:45:40 UTC 2023
    - 4.7K bytes
    - Viewed (0)
  7. .github/bot_config.yml

             * It has an added advantage since you can you easily switch to different hardware accelerators (cpu, gpu, tpu) as per the task.
             * All you need is a good internet connection and you are all set.
          * Try to build TF from sources by changing CPU optimization flags.
       
       *Please let us know if this helps.*
       
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jun 03 04:55:57 UTC 2024
    - 4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/python/BUILD

            "//tensorflow/compiler/mlir/quantization/...",
            "//tensorflow/compiler/mlir/tf2xla/transforms/...",
            "//tensorflow/lite/...",
            "//third_party/cloud_tpu/inference_converter/...",  # TPU Inference Converter V1
        ] + internal_visibility_allowlist(),
    )
    
    package(
        # copybara:uncomment default_applicable_licenses = ["@stablehlo//:license"],
        default_visibility = [
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 20:18:36 UTC 2024
    - 7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/utils/xla_rewrite_util_test.cc

    #include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
    #include "tensorflow/core/lib/core/status_test_util.h"
    #include "tensorflow/core/platform/test.h"
    #include "tensorflow/core/protobuf/tpu/topology.pb.h"
    #include "tensorflow/core/util/device_name_utils.h"
    
    // #include <gmock/gmock.h>
    // #include <gtest/gtest.h>
    
    namespace tensorflow {
    namespace {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Apr 26 09:37:10 UTC 2024
    - 4.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/transforms/rewrite_tpu_embedding_ops.cc

      RecvTPUEmbeddingActivationsOp recv_op;
      if (failed(GetOp(region, &recv_op))) return failure();
    
      SendTPUEmbeddingGradientsOp send_op;
      if (failed(GetOp(region, &send_op))) return failure();
    
      // No TPU embedding ops.
      if (!recv_op && !send_op) return success();
    
      Location loc = recv_op ? recv_op.getLoc() : send_op.getLoc();
      StringRef config = recv_op ? recv_op.getConfig() : send_op.getConfig();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 15 22:55:42 UTC 2024
    - 4.5K bytes
    - Viewed (0)
Back to top