Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 71 for tpu0 (0.04 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/cluster_formation.mlir

        // CHECK-SAME: <{device = "tpu0"}>
        // CHECK: %[[B_OUTPUT:[0-9]*]] = "tf.B"(%[[A_OUTPUT]]) : (tensor<?xi32>) -> tensor<?xi32>
        %3 = "tf.B"(%2) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>
    
        // CHECK: %[[C_OUTPUT:[0-9]*]] = "tf.C"(%[[A_OUTPUT]], %[[B_OUTPUT]]) : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
        %4 = "tf.C"(%2, %3) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 31 08:59:10 UTC 2023
    - 14.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util.cc

      auto status = tensorflow::DeviceNameUtils::DeviceNameToCpuDeviceName(
          tpu0_device, cpu0_device);
      if (!status.ok())
        return op->emitError()
               << "error in converting TPU0 to CPU0. The TPU device is "
               << tpu0_device;
      return mlir::success();
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jun 10 20:10:40 UTC 2024
    - 32.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/xla_tpu_device.cc

    #include "xla/stream_executor/tpu/c_api_conversions.h"
    #include "xla/stream_executor/tpu/status_helper.h"
    #include "xla/stream_executor/tpu/tpu_api.h"
    #include "xla/stream_executor/tpu/tpu_node_context.h"
    #include "xla/stream_executor/tpu/tpu_platform.h"
    #include "xla/stream_executor/tpu/tpu_platform_interface.h"
    #include "xla/stream_executor/tpu/tpu_stream_interface.h"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 22:53:47 UTC 2024
    - 20.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/tpu-dynamic-layout-pass.mlir

    // RUN: tf-opt %s -split-input-file -tf-tpu-dynamic-layout-pass | FileCheck %s
    
    // Tests that the pass can transform non-replicated execution.
    
    // CHECK: func @non_replicated(%[[ARG0:.*]]: tensor<*x!tf_type.resource> {tf.device = "/device:CPU:0"}) -> tensor<i32>
    func.func @non_replicated(%arg0: tensor<*x!tf_type.resource> {tf.device = "/device:CPU:0"}) -> tensor<i32> {
      // CHECK: %[[COMPILE:.*]]:2 = "tf_device.launch"
      // CHECK-NEXT: "tf._TPUCompileMlir"()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 31 08:59:10 UTC 2023
    - 29.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/runtime_passes.td

    limitations under the License.
    ==============================================================================*/
    
    include "mlir/Pass/PassBase.td"
    
    def TPURewritePass : Pass<"tf-tpu-rewrite", "mlir::ModuleOp"> {
      let summary = "Rewrites a `tf_device.cluster_func` on TPUs into TPU runtime operations.";
    
      let description = [{
        This pass rewrites a `tf_device.cluster_func` operation into a sequence of `tf._TPUCompileMlir`
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jan 10 18:58:57 UTC 2024
    - 10.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tf2xla/internal/passes/clustering_passes.td

      }];
    
      let constructor = "tensorflow::tf2xla::internal::CreateVerifyClusteringPass()";
    }
    
    def TPUClusterFormationPass : Pass<"tf-tpu-cluster-formation", "ModuleOp"> {
      let summary = "Forms clusters from operations assigned to the same TPU computation";
    
      let description = [{
        TPU computations from the frontend are composed of a `tf.TPUReplicateMetadata`
        op, a subgraph of ops (TensorFlow Dialect) each with a matching
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 02:01:13 UTC 2024
    - 19.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/tpu-variable-runtime-reformatting.mlir

    // RUN: tf-opt %s -split-input-file -tf-tpu-variable-runtime-reformatting| FileCheck %s
    
    // Tests that the pass can correctly transform a training loop with 2 replicas.
    
    !tf_res_f32 = tensor<*x!tf_type.resource<tensor<f32>>>
    !tf_res_md_f32 = tensor<*x!tf_type.resource<tensor<3x3x1x32xf32>>> // Multi-dim f32
    
    module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
      // CHECK-LABEL: func @main
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 31 08:59:10 UTC 2023
    - 25.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/tests/tpu-merge-variables-with-execute.mlir

    // RUN: tf-opt -split-input-file -verify-diagnostics -tf-tpu-merge-variables-with-execute %s | FileCheck %s
    
    // Tests that the pass merges only variable reads/writes on the same device.
    
    // CHECK-LABEL: func @merge_same_device_variables
    // CHECK-SAME: %[[ARG_0:.*]]: tensor<*x!tf_type.resource<tensor<32xf32>>>
    // CHECK-SAME: %[[ARG_1:.*]]: tensor<*x!tf_type.resource<tensor<64xf32>>>
    // CHECK-SAME: %[[ARG_2:.*]]: tensor<*x!tf_type.resource<tensor<16xf32>>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 31 08:59:10 UTC 2023
    - 24.5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.cc

    namespace tensorflow {
    namespace tf2xla {
    namespace v1 {
    
    using ::tensorflow::tpu::FunctionToHloArgs;
    using ::tensorflow::tpu::GuaranteedConsts;
    using ::tensorflow::tpu::MlirToHloArgs;
    using ::tensorflow::tpu::ShardingAndIndex;
    
    auto* phase2_bridge_compilation_status =
        tensorflow::monitoring::Counter<1>::New(
            "/tensorflow/core/tf2xla/api/v1/"
            "phase2_compilation_status", /*metric_name*/
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 22:19:26 UTC 2024
    - 14K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/tests/replicate_to_island.mlir

        tf_executor.fetch
      }
      func.return
    }
    // CHECK: "tf.opA"
    // device = "/TPU:0"
    // CHECK: _parallel_execution_ids = "r0:0"
    // CHECK: "tf.opA"
    // device = "/TPU:0"
    // CHECK: _parallel_execution_ids = "r0:1"
    // CHECK: "tf.opA"
    // device = "/TPU:1"
    // CHECK: _parallel_execution_ids = "r1:0"
    // CHECK: "tf.opA"
    // device = "/TPU:1"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 31 08:59:10 UTC 2023
    - 15.1K bytes
    - Viewed (0)
Back to top