- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 28 for cluster_func (0.17 sec)
-
tensorflow/compiler/mlir/tensorflow/transforms/tpu_resource_read_for_write.cc
operands.append(read_operands.begin(), read_operands.end()); auto loc = cluster_func.getLoc(); auto new_cluster_func = builder.create<tf_device::ClusterFuncOp>( loc, cluster_func.getResultTypes(), operands, cluster_func->getAttrs()); cluster_func.replaceAllUsesWith(new_cluster_func); func::FuncOp func = cluster_func.getFuncOp(); Block& block = func.front();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 16:54:40 UTC 2024 - 5.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_rewrite_pass.cc
cluster_func->getParentOfType<tf_device::ParallelExecuteOp>(); if (old_parallel_execute && cluster_func->getParentOp() != old_parallel_execute) { cluster_func->emitError() << "The ParallelExecute ancestor of a " "ClusterFunc must be its direct parent."; return failure(); } if (!old_parallel_execute) old_parallel_execute = TF::BuildParallelExecuteOp(cluster_func, builder);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 21:25:12 UTC 2024 - 29.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tpu_sharding_identification.mlir
// CHECK: tf_device.cluster_func // CHECK-SAME: input_sharding_configuration = ["\01\02\03"] // CHECK-SAME: output_sharding_configuration = [] "tf_device.cluster_func"(%1) {func = @cluster_func, use_spmd_for_xla_partitioning = true, num_cores_per_replica = 1 : i64} : (tensor<*xf32>) -> () func.return } // CHECK-LABEL: func @cluster_func
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Feb 20 19:07:52 UTC 2024 - 47.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/mark_input_output_aliases.cc
void MarkInputOutputAliasesPass::runOnOperation() { SmallVector<tf_device::ClusterFuncOp, 4> cluster_funcs; ModuleOp module = getOperation(); module.walk([&](tf_device::ClusterFuncOp cluster_func) { // Map resource values to pair of input-output indices. llvm::DenseMap<Value, AliasInfo> resource_alias_info_map; if (failed(BuildAliasingInfo(cluster_func, resource_alias_info_map)) || resource_alias_info_map.empty()) { return;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 04:14:26 UTC 2024 - 7.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/tpu_sharding_identification_pass.cc
} // Extracts input/output sharding configuration of `cluster_func` by parsing // XlaSharding ops inside the `cluster_func`. LogicalResult IdentifyXlaShardingForTPUComputation( Builder* builder, mlir::tf_device::ClusterFuncOp cluster_func) { // Look up function definition from module. mlir::func::FuncOp func = cluster_func->getParentOfType<ModuleOp>()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 02:01:13 UTC 2024 - 28.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/cluster_outlining.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 21:25:12 UTC 2024 - 4.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.cc
mlir::tf_device::ClusterFuncOp cluster_func, mlir::SmallVector<xla::OpSharding, 4>* output_sharding_list) { output_sharding_list->reserve(cluster_func.getNumResults()); const auto output_sharding_attrs = cluster_func.getOperation()->getAttrOfType<mlir::ArrayAttr>( kOutputShardingAttr); if (!output_sharding_attrs) return cluster_func.emitError(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 22 21:28:13 UTC 2024 - 34K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tpu-resource-read-for-write.mlir
// CHECK-NEXT: [[READ:%.*]] = "tf.ReadVariableOp"([[ARG2]]) // CHECK-NEXT: [[CLUSTER:%.*]]:2 = "tf_device.cluster_func"([[ARG0]], [[ARG1]], [[READ]]) // CHECK-SAME: _replication_info = "write", _xla_compile_device_type = "TPU" %0:2 = "tf_device.cluster_func"(%arg0, %arg1) {_replication_info = "write", _xla_compile_device_type = "TPU", func = @write_func} : (tensor<i32>, tensor<f32>) -> (tensor<f32>, tensor<i32>)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 16:54:40 UTC 2024 - 5.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tpu_resource_partitioning.mlir
%1 = "tf.ReadVariableOp"(%0) : (tensor<!tf_type.resource<tensor<i32>>>) -> tensor<i32> // CHECK: [[COMPUTATION:%.+]] = "tf_device.cluster_func"([[INPUT]]) %2 = "tf_device.cluster_func"(%1) {func = @computation, use_spmd_for_xla_partitioning = true} : (tensor<i32>) -> tensor<i32> // CHECK: [[OUTPUT:%.+]]:2 = "tf.TPUPartitionedOutputV2"([[COMPUTATION]]) // CHECK-SAME: _XlaSharding = ""
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jan 23 23:53:20 UTC 2024 - 15.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/runtime_passes.td
let summary = "Rewrites a `tf_device.cluster_func` on TPUs into TPU runtime operations."; let description = [{ This pass rewrites a `tf_device.cluster_func` operation into a sequence of `tf._TPUCompileMlir` and `tf.TPUExecute` operations. `tf._TPUCompileMlir` contains a MLIR module that is functionally equivalent to the function referenced by `tf_device.cluster_func`.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jan 10 18:58:57 UTC 2024 - 10.7K bytes - Viewed (0)