Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 78 for tpu0 (0.04 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/cluster_formation.mlir

        // CHECK-SAME: <{device = "tpu0"}>
        // CHECK: %[[B_OUTPUT:[0-9]*]] = "tf.B"(%[[A_OUTPUT]]) : (tensor<?xi32>) -> tensor<?xi32>
        %3 = "tf.B"(%2) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>
    
        // CHECK: %[[C_OUTPUT:[0-9]*]] = "tf.C"(%[[A_OUTPUT]], %[[B_OUTPUT]]) : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
        %4 = "tf.C"(%2, %3) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 31 08:59:10 UTC 2023
    - 14.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util.cc

      auto status = tensorflow::DeviceNameUtils::DeviceNameToCpuDeviceName(
          tpu0_device, cpu0_device);
      if (!status.ok())
        return op->emitError()
               << "error in converting TPU0 to CPU0. The TPU device is "
               << tpu0_device;
      return mlir::success();
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jun 10 20:10:40 UTC 2024
    - 32.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/xla_tpu_device.cc

    #include "xla/stream_executor/tpu/c_api_conversions.h"
    #include "xla/stream_executor/tpu/status_helper.h"
    #include "xla/stream_executor/tpu/tpu_api.h"
    #include "xla/stream_executor/tpu/tpu_node_context.h"
    #include "xla/stream_executor/tpu/tpu_platform.h"
    #include "xla/stream_executor/tpu/tpu_platform_interface.h"
    #include "xla/stream_executor/tpu/tpu_stream_interface.h"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 22:53:47 UTC 2024
    - 20.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/tpu-dynamic-layout-pass.mlir

    // RUN: tf-opt %s -split-input-file -tf-tpu-dynamic-layout-pass | FileCheck %s
    
    // Tests that the pass can transform non-replicated execution.
    
    // CHECK: func @non_replicated(%[[ARG0:.*]]: tensor<*x!tf_type.resource> {tf.device = "/device:CPU:0"}) -> tensor<i32>
    func.func @non_replicated(%arg0: tensor<*x!tf_type.resource> {tf.device = "/device:CPU:0"}) -> tensor<i32> {
      // CHECK: %[[COMPILE:.*]]:2 = "tf_device.launch"
      // CHECK-NEXT: "tf._TPUCompileMlir"()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 31 08:59:10 UTC 2023
    - 29.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/runtime_passes.td

    limitations under the License.
    ==============================================================================*/
    
    include "mlir/Pass/PassBase.td"
    
    def TPURewritePass : Pass<"tf-tpu-rewrite", "mlir::ModuleOp"> {
      let summary = "Rewrites a `tf_device.cluster_func` on TPUs into TPU runtime operations.";
    
      let description = [{
        This pass rewrites a `tf_device.cluster_func` operation into a sequence of `tf._TPUCompileMlir`
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jan 10 18:58:57 UTC 2024
    - 10.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tf2xla/internal/passes/clustering_passes.td

      }];
    
      let constructor = "tensorflow::tf2xla::internal::CreateVerifyClusteringPass()";
    }
    
    def TPUClusterFormationPass : Pass<"tf-tpu-cluster-formation", "ModuleOp"> {
      let summary = "Forms clusters from operations assigned to the same TPU computation";
    
      let description = [{
        TPU computations from the frontend are composed of a `tf.TPUReplicateMetadata`
        op, a subgraph of ops (TensorFlow Dialect) each with a matching
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 02:01:13 UTC 2024
    - 19.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/tpu-variable-runtime-reformatting.mlir

    // RUN: tf-opt %s -split-input-file -tf-tpu-variable-runtime-reformatting| FileCheck %s
    
    // Tests that the pass can correctly transform a training loop with 2 replicas.
    
    !tf_res_f32 = tensor<*x!tf_type.resource<tensor<f32>>>
    !tf_res_md_f32 = tensor<*x!tf_type.resource<tensor<3x3x1x32xf32>>> // Multi-dim f32
    
    module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
      // CHECK-LABEL: func @main
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 31 08:59:10 UTC 2023
    - 25.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/tests/tpu-merge-variables-with-execute.mlir

    // RUN: tf-opt -split-input-file -verify-diagnostics -tf-tpu-merge-variables-with-execute %s | FileCheck %s
    
    // Tests that the pass merges only variable reads/writes on the same device.
    
    // CHECK-LABEL: func @merge_same_device_variables
    // CHECK-SAME: %[[ARG_0:.*]]: tensor<*x!tf_type.resource<tensor<32xf32>>>
    // CHECK-SAME: %[[ARG_1:.*]]: tensor<*x!tf_type.resource<tensor<64xf32>>>
    // CHECK-SAME: %[[ARG_2:.*]]: tensor<*x!tf_type.resource<tensor<16xf32>>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 31 08:59:10 UTC 2023
    - 24.5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/tests/resource-device-inference.mlir

    // CHECK-LABEL: func @ifregion_then
    // CHECK-SAME: (%arg0: {{.+}} {tf.device = "/TPU:0"}, %arg1: {{.+}} {tf.device = "/TPU:1"}
    func.func @ifregion_then(
      %arg0: !tf_res,
      %arg1: !tf_res) {
      tf_executor.graph {
        // CHECK: tf_executor.island
        %island = tf_executor.island {
          // CHECK-NEXT: "tf.Identity"
          // CHECK-SAME: {device = "/TPU:0"}
          %id0 = "tf.Identity"(%arg0) : (!tf_res)
            -> !tf_res
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 17 16:01:45 UTC 2022
    - 18.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/tests/executor_tpuv1_island_coarsening/executor_tpuv1_island_coarsening.mlir

        %outputs_0, %control_1 = tf_executor.island wraps "tf.Const"() {_xla_compile_device_type = "TPU", value = dense<2> : tensor<i32>} : () -> tensor<i32>
        %outputs_3, %control_4 = tf_executor.island wraps "tf.AddV2"(%outputs, %outputs_0) {_xla_compile_device_type = "TPU"} : (tensor<i32>, tensor<i32>) -> tensor<i32>
        tf_executor.fetch
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Aug 02 03:15:59 UTC 2022
    - 36.2K bytes
    - Viewed (0)
Back to top