Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 8 of 8 for TPUPartitionedInputV2 (0.25 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/tpu_resource_partitioning.mlir

      // CHECK:      [[INPUT0:%.+]] = "tf.TPUPartitionedInputV2"([[READ0]], [[READ1]])
      // CHECK-DAG:  [[READ2:%.+]] = "tf.ReadVariableOp"([[ARG0]])
      // CHECK-DAG:  [[READ3:%.+]] = "tf.ReadVariableOp"([[ARG1]])
      // CHECK:      [[INPUT1:%.+]] = "tf.TPUPartitionedInputV2"([[READ2]], [[READ3]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jan 23 23:53:20 UTC 2024
    - 15.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/tpu_reorder_replicate_and_partitioned_inputs.mlir

      // CHECK: [[PI_0:%.*]] = "tf.TPUPartitionedInputV2"([[ARG0]], [[ARG1]])
      %pi_0 = "tf.TPUPartitionedInputV2"(%arg0, %arg1) {device = "", partition_dims = []} : (tensor<!tf_type.resource<tensor<10x3xf32>>>, tensor<!tf_type.resource<tensor<10x3xf32>>>) -> tensor<!tf_type.resource<tensor<10x3xf32>>>
      // CHECK: [[PI_1:%.*]] = "tf.TPUPartitionedInputV2"([[ARG2]], [[ARG3]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jan 24 23:08:55 UTC 2023
    - 14.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/tpu_resource_partitioning.cc

          // ClusterFunc's use of the Read is replaced with use of the
          // TPUPartitionedInputV2.
          read_use.set(new_partitioned_input);
        } else {
          // Outside compiled code's use of the Read after TPUPartitionedInputV2 is
          // replaced with use of the first Read before the TPUPartitionedInputV2.
          if (sharding.type() != xla::OpSharding::REPLICATED) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 11.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/tpu_sharding_identification.mlir

    // replicate sharding ("").
    
    // CHECK-LABEL: func @partitioned_input_output
    func.func @partitioned_input_output(%arg0: tensor<*xi32>, %arg1: tensor<*xi32>) -> (tensor<*xi32>, tensor<*xi32>) {
      %0 = "tf.TPUPartitionedInputV2"(%arg0) {_XlaSharding = "\01\02\03", partition_dims = []} : (tensor<*xi32>) -> tensor<*xi32>
      // CHECK:      tf_device.cluster_func
      // CHECK-SAME: input_sharding_configuration = ["\01\02\03", ""]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Feb 20 19:07:52 UTC 2024
    - 47.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_rewrite_pass.cc

              }
            }
          }
        }
      }
      for (auto cluster_operand : cluster.getOperands()) {
        Operation* def = cluster_operand.getDefiningOp();
        // This pass assumes that a TPUPartitionedInputV2 is preceeded by
        // ReadVariable ops, and not vice versa. An earlier pass,
        // TPUResourceReadsWritesPartitioning, should have ensured this
        // precondition.
        if (!def) continue;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 21:25:12 UTC 2024
    - 29.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tf2xla/internal/passes/tpu_cluster_formation.cc

              }
            }
            // When model parallelism is used in conjunction with data parallelism
            // for resource inputs, we need to collect the per replica resource
            // inputs from input to `tf.TPUPartitionedInputV2` ops.
            if (auto pi = llvm::dyn_cast_or_null<mlir::TF::TPUPartitionedInputV2Op>(
                    def)) {
              if (pi->getNumOperands() != num_cores_per_replica)
                status = pi.emitOpError()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 22:03:30 UTC 2024
    - 39.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.cc

                            input_index, tiled_input_size, num_cores_per_replica));
        };
    
        // If input is already partitioned using the `tf.TPUPartitionedInputV2` op,
        // only replicated sharding is supported where i-th operand to
        // `tf.TPUPartitionedInputV2` op is input to the i-th logical device.
        if (auto partitioned_input =
                llvm::dyn_cast_or_null<mlir::TF::TPUPartitionedInputV2Op>(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 22 21:28:13 UTC 2024
    - 34K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/transforms/passes.h

    // attributes back to legacy attributes.
    std::unique_ptr<OperationPass<func::FuncOp>>
    CreateConvertToLegacyCompileAndReplicateAttributesPass();
    
    // Creates a pass that converts all TPUPartitionedInput to TPUPartitionedInputV2
    std::unique_ptr<OperationPass<func::FuncOp>>
    CreateTPUPartitionedOpConversionPass();
    
    std::unique_ptr<OperationPass<ModuleOp>> CreateTPUValidateInputsPass();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:18:05 UTC 2024
    - 31.8K bytes
    - Viewed (0)
Back to top