- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 235 for tpu0 (0.23 sec)
-
tensorflow/compiler/mlir/tfrt/ir/mlrt/tf_mlrt_tpu_ops.td
); let assemblyFormat = "attr-dict"; } def CompileAndExecuteOp : TensorflowMlrtTpu_Op<"compile_and_execute"> { let summary = "tpu compile and execute operation"; let description = [{ tf_mlrt_tpu.compile_and_execute compiles a mlir tpu program and executes the compiled tpu program. $mlir_module is a serialized MLIR module with a `main` function that contains target computation.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 04 21:25:31 UTC 2023 - 3.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/end-to-end-tpu-reshard-variables.mlir
// RUN: tf-opt %s -tf-replicated-clustering-bridge-v2 -tfrt-lower-cluster-to-runtime-ops-tpu 2>&1 | FileCheck %s // TPUReshardVariables should be inserted even when While functions' shapes are // different than While operand shapes. Test the whole tf-tpu-bridge because // correct insertion of TPUReshardVariables depends on multiple passes including // TPUVariableRuntimeReformatting, ShapeInference, WhileRegion canonicalization, // and TPUMergeVariablesWithExecute.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 13 21:23:47 UTC 2024 - 4.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tpu-annotate-dynamic-shape-inputs.mlir
// RUN: tf-opt -split-input-file -verify-diagnostics -tf-tpu-annotate-dynamic-shape-inputs %s | FileCheck %s // Test that annotate the inputs of the cluster func to be dynamic shaped. module attributes {tf.devices = ["/job:worker/replica:0/task:0/device:CPU:0", "/job:worker/replica:0/task:0/device:TPU_SYSTEM:0", "/job:worker/replica:0/task:0/device:TPU:0"]} { func.func @main( %arg0: tensor<2048xi64> {tf.device = "/job:localhost/replica:0/task:0/device:CPU:0"},
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Aug 14 15:35:49 UTC 2023 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tpu-dynamic-layout-pass.mlir
// RUN: tf-opt %s -split-input-file -tf-tpu-dynamic-layout-pass | FileCheck %s // Tests that the pass can transform non-replicated execution. // CHECK: func @non_replicated(%[[ARG0:.*]]: tensor<*x!tf_type.resource> {tf.device = "/device:CPU:0"}) -> tensor<i32> func.func @non_replicated(%arg0: tensor<*x!tf_type.resource> {tf.device = "/device:CPU:0"}) -> tensor<i32> { // CHECK: %[[COMPILE:.*]]:2 = "tf_device.launch" // CHECK-NEXT: "tf._TPUCompileMlir"()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 31 08:59:10 UTC 2023 - 29.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/ir/mlrt/tf_ops.td
}]; } def TFTPUCompileAndExecuteOp : TensorflowMlrt_Op<"tf_tpu_compile_and_execute", [TF_MustExecute]> { let summary = "tpu compile and execute operation"; let description = [{ tf_mlrt_tpu.compile_and_execute compiles a mlir tpu program and executes the compiled tpu program. $mlir_module is a serialized MLIR module with a `main` function that contains target computation.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 22 21:35:32 UTC 2024 - 6.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/runtime_passes.h
#include "mlir/IR/BuiltinOps.h" // from @llvm-project #include "mlir/Pass/Pass.h" // from @llvm-project namespace mlir { namespace TFTPU { // Creates a pass that rewrites `tf_device.launch_func` on TPUs into TPU runtime // ops. std::unique_ptr<mlir::OperationPass<mlir::ModuleOp>> CreateTPURewritePass( llvm::StringRef module_name = llvm::StringRef()); // Creates a pass that adds ops which perform formatting on variables at
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jan 10 18:58:57 UTC 2024 - 2.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/runtime_passes.td
limitations under the License. ==============================================================================*/ include "mlir/Pass/PassBase.td" def TPURewritePass : Pass<"tf-tpu-rewrite", "mlir::ModuleOp"> { let summary = "Rewrites a `tf_device.cluster_func` on TPUs into TPU runtime operations."; let description = [{ This pass rewrites a `tf_device.cluster_func` operation into a sequence of `tf._TPUCompileMlir`
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jan 10 18:58:57 UTC 2024 - 10.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tpu_cluster_cleanup_attributes.cc
// device. Device attribute is used to infer the appropriate sharding // within TPUs for this op. // TODO(b/183598857): Use explicit sharding ops from the front-end. // For example, dequeue ops generated by // tensorflow/python/tpu/tpu_feed.py if (!tensorflow::IsTPUReplicatedCore(attr.getValue()) && !isa<tf_device::LaunchOp>(op)) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 05 23:50:19 UTC 2022 - 3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/clustering_passes.td
}]; let constructor = "tensorflow::tf2xla::internal::CreateVerifyClusteringPass()"; } def TPUClusterFormationPass : Pass<"tf-tpu-cluster-formation", "ModuleOp"> { let summary = "Forms clusters from operations assigned to the same TPU computation"; let description = [{ TPU computations from the frontend are composed of a `tf.TPUReplicateMetadata` op, a subgraph of ops (TensorFlow Dialect) each with a matching
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 02:01:13 UTC 2024 - 19.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tpu-variable-runtime-reformatting.mlir
// RUN: tf-opt %s -split-input-file -tf-tpu-variable-runtime-reformatting| FileCheck %s // Tests that the pass can correctly transform a training loop with 2 replicas. !tf_res_f32 = tensor<*x!tf_type.resource<tensor<f32>>> !tf_res_md_f32 = tensor<*x!tf_type.resource<tensor<3x3x1x32xf32>>> // Multi-dim f32 module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} { // CHECK-LABEL: func @main
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 31 08:59:10 UTC 2023 - 25.4K bytes - Viewed (0)