Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 8 of 8 for TPUPartitionedOutputV2 (0.25 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/tpu_resource_partitioning.mlir

      %2 = "tf_device.cluster_func"(%1) {func = @computation, use_spmd_for_xla_partitioning = true} : (tensor<i32>) -> tensor<i32>
      // CHECK:      [[OUTPUT:%.+]]:2 = "tf.TPUPartitionedOutputV2"([[COMPUTATION]])
      // CHECK-SAME: _XlaSharding = ""
      // CHECK-SAME: partition_dims = []
      // CHECK-DAG:  "tf.AssignVariableOp"([[ARG0]], [[OUTPUT]]#0)
      // CHECK-DAG:  "tf.AssignVariableOp"([[ARG1]], [[OUTPUT]]#1)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jan 23 23:53:20 UTC 2024
    - 15.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/tpu_cluster_formation.mlir

      %4 = "tf.Identity"(%3#0) : (tensor<?xi32>) -> (tensor<?xi32>)
      %5:2 = "tf.TPUPartitionedOutputV2"(%4) {_XlaSharding = "", partition_dims = []} : (tensor<?xi32>) -> (tensor<?xi32>, tensor<?xi32>)
      %6 = "tf.Identity"(%3#1) : (tensor<?xi32>) -> (tensor<?xi32>)
      %7:2 = "tf.TPUPartitionedOutputV2"(%6) {_XlaSharding = "", partition_dims = []} : (tensor<?xi32>) -> (tensor<?xi32>, tensor<?xi32>)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 22:03:30 UTC 2024
    - 53.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/tests/tpu_sharding_identification.mlir

      %1:2 = "tf_device.cluster_func"(%0, %arg1) {func = @cluster_func, use_spmd_for_xla_partitioning = true, num_cores_per_replica = 1 : i64} : (tensor<*xi32>, tensor<*xi32>) -> (tensor<*xi32>, tensor<*xi32>)
      %2 = "tf.TPUPartitionedOutputV2"(%1#1) {_XlaSharding = "\04\05\06", partition_dims = []} : (tensor<*xi32>) -> tensor<*xi32>
      func.return %1#0, %2 : tensor<*xi32>, tensor<*xi32>
    }
    
    // CHECK-LABEL: func @cluster_func
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Feb 20 19:07:52 UTC 2024
    - 47.5K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_rewrite_pass.cc

          // Check that user has no outputs that are TPUPartitionedOutputV2
          for (auto result : user->getResults()) {
            for (Operation* user : llvm::make_early_inc_range(result.getUsers())) {
              if (llvm::isa<TF::TPUPartitionedOutputV2Op>(user)) {
                user->emitError() << "Input of TPUPartitionedOutputV2 must "
                                  << "be in tpu computation.";
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 21:25:12 UTC 2024
    - 29.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.cc

        const auto output_sharding_type = output_sharding.type();
    
        // If output is demultiplexed using the `tf.TPUPartitionedOutputV2` op, only
        // replicated sharding is supported where i-th output of
        // `tf.TPUPartitionedOutputV2` op maps to the output of i-th logical device.
        // Also `tf.TPUPartitionedOutputV2` op must be a unique user of
        // TPU Cluster (`tf_device.old_parallel_execute`) output.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 22 21:28:13 UTC 2024
    - 34K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/tests/tpu_partitioned_op_conversion.mlir

    func.func @partitioned_output_3d(%arg: tensor<!tf_type.resource<tensor<16x16x16xf32>>>) -> tensor<!tf_type.resource<tensor<16x8x16xf32>>> {
      // CHECK: [[PO:%.*]] = "tf.TPUPartitionedOutputV2"([[ARG]])
      // CHECK-SAME: _XlaSharding = "123"
      // CHECK-SAME: partition_dims = [1, 2, 1]
      "tf.TPUReplicateMetadata"() {num_cores_per_replica = 2 : i64, num_replicas = 2 : i64} : () -> ()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 20 17:43:51 UTC 2023
    - 8.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/tpu_rewrite.mlir

        // CHECK-NOT: tf.TPUPartitionedOutputV2
        %partitioned_output:2 = "tf.TPUPartitionedOutputV2"(%computation) {N = 2 : i64, partition_dims = []} : (tensor<i32>) -> (tensor<i32>, tensor<i32>)
        // CHECK: "tf.AssignVariableOp"(%arg0, %[[PARALLEL_EXECUTE_OUTPUT]]#0)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 22:03:30 UTC 2024
    - 172.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/ir/tf_ops.td

      );
    
      let results = (outs
        TF_Int64Tensor:$device_ordinal
      );
    }
    
    // We must manually define TPUPartitionedInput, TPUPartitionedInputV2,
    // TPUPartitionedOutput, and TPUPartitionedOutputV2 since they have an
    // optional attribute, _XlaSharding, unlike the TensorFlow definition.
    def TF_TPUPartitionedInputOp : TF_Op<"TPUPartitionedInput", [Pure]> {
      let summary = [{
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 04:08:35 UTC 2024
    - 90.5K bytes
    - Viewed (0)
Back to top