- Sort Score
- Result 10 results
- Languages All
Results 101 - 110 of 135 for tpu0 (0.46 sec)
-
tensorflow/compiler/mlir/tensorflow/tests/rewrite_tpu_embedding_ops.mlir
// RUN: tf-opt -tf-rewrite-tpu-embedding-ops %s | FileCheck %s // CHECK-LABEL: func @recv_tpu_embedding_activations func.func @recv_tpu_embedding_activations() -> (tensor<512x256xf32>) { // CHECK: %[[DATA:.*]] = "tf.XlaRecvTPUEmbeddingDeduplicationData"() <{config = {{.*}}}> : () -> tensor<!tf_type.variant> // CHECK: %[[RESULT:.*]] = "tf.XlaRecvTPUEmbeddingActivations"(%[[DATA]]) <{config = {{.*}}}> : (tensor<!tf_type.variant>) -> tensor<512x256xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 4.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v2/cluster_tf.h
// is_supported_by_replicated_brige - If the graph targets the replicated // bridge. Set it to true for replicated/partitioned graphs. e.g. replicated // and single-core TPU graphs. Set this to false if the graph is not // replicated, e.g. CPU/GPU graphs. is_in_fallback_enabled_mode - Whether this // was called with fallback to the non-MLIR Bridge. This is just for logging
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Feb 16 23:11:04 UTC 2024 - 2.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/function/function.cc
// for training. if (absl::StrContains(pass_options.default_device, "CPU")) { pass_options.skip_fold_transpose_in_ops = true; } pass_options.enable_optimizer = options.enable_optimizer; // Use TFRT TPU OpKernel for training. pass_options.target_tpurt = false; pass_options.tpu_use_core_selector = options.tpu_use_core_selector; pass_options.tpu_use_bundled_transfer = options.tpu_use_bundled_transfer;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 22 08:13:15 UTC 2024 - 3.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tpu_identity_pruning.mlir
// RUN: tf-opt %s -tf-tpu-identity-pruning | FileCheck %s --dump-input=always // Tests Identity op in cluster is pruned away. // CHECK-LABEL: func @testIdentity // CHECK-SAME: ([[ARG0:%.*]]: tensor<i32>) func.func @testIdentity(%arg0: tensor<i32>) { // CHECK-NOT: "tf.Identity" // CHECK: "tf_device.cluster" // CHECK-NEXT: tf_device.return [[ARG0]] %0 = "tf_device.cluster"() ({ %1 = "tf.Identity"(%arg0) : (tensor<i32>) -> tensor<i32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 28 12:06:33 UTC 2022 - 3.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_inline_tpu_island.cc
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" #include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h" #include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h" #define DEBUG_TYPE "tf-executor-tpu-v1-island-inlining" namespace mlir { namespace tf_executor { namespace { constexpr llvm::StringRef kNestedModule = "_tpu_v1_compat_outlined"; #define GEN_PASS_DEF_EXECUTORTPUV1ISLANDINLININGPASS
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Dec 19 08:06:04 UTC 2023 - 4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/canonicalize_compile_and_replicate_attributes.cc
// This transformation pass converts existing compilation and replication // attributes into unified attributes. For example, A _tpu_replicate=X // should be replaced with _xla_compile_device_type=TPU and // _replication_info=X attributes by the conversion. An _XlaMustCompile=true // should be replaced with _xla_compile_device_type with the value of device // attribute. #include "llvm/ADT/StringRef.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 05 23:50:19 UTC 2022 - 4.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tpu_identity_pruning.cc
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" namespace mlir { namespace TFTPU { namespace { // This pass removes Identity/IdentityN ops from the TPU computation and // reachable functions. // TODO(lyandy): Remove this pass once resource op lifting is migrated to use // resource alias analysis and support region based control flow. Removing
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 05 23:50:19 UTC 2022 - 4.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.proto
// TODO(b/263528090): Check the condition when this feature is beneficial. bool enable_two_input_tensors = 11; // Supports TPU model quantization. If the target model for the quantization // is already converted for TPU, this flag may be helpful. Note that this // feature may be unstable as it is under the experimental stage. bool experimental_enable_tpu_model_support = 12;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 19 06:31:19 UTC 2024 - 9.2K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compile_util.cc
// We store information about the JIT-compiled XLA computation in the // ResourceMgr. The DeviceCompiler (which contains the DeviceCompilationCache) // is stored in the tfrt_global ResourceMgr for TPU and the Device ResourceMgr // for CPU/GPU. This is to make sure the DeviceCompiler's lifecycle is // maintained appropriately. ResourceMgr* rm = nullptr; if (device_type == DEVICE_TPU) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 4.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_side_effects.h
}; struct DatasetIterator : ::mlir::SideEffects::Resource::Base<DatasetIterator> { StringRef getName() final { return "DatasetIterator"; } }; // Special resource type to track TPU Embedding specific ops, which must execute // but do not have side effects with one another or with resource variable ops. struct TPUEmbedding : ::mlir::SideEffects::Resource::Base<TPUEmbedding> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Dec 26 18:45:40 UTC 2023 - 4.7K bytes - Viewed (0)