Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 24 for ClusterFuncOp (0.23 sec)

  1. tensorflow/compiler/mlir/tensorflow/transforms/mark_input_output_aliases.cc

        assert(aliasing_attr.getInt() == alias_info.output_index);
      }
    }
    
    void MarkInputOutputAliasesPass::runOnOperation() {
      SmallVector<tf_device::ClusterFuncOp, 4> cluster_funcs;
      ModuleOp module = getOperation();
      module.walk([&](tf_device::ClusterFuncOp cluster_func) {
        // Map resource values to pair of input-output indices.
        llvm::DenseMap<Value, AliasInfo> resource_alias_info_map;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 04:14:26 UTC 2024
    - 7.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/tpu_space_to_depth_pass.cc

    }
    
    void TPUSpaceToDepthPass::runOnOperation() {
      std::optional<tf_device::ClusterFuncOp> cluster_func;
      // Space to depth only supports training loop.
      auto func_result = getOperation().walk([&](tf_device::ClusterFuncOp cluster) {
        cluster_func = cluster;
        return WalkResult::interrupt();
      });
    
      // Return if there is no tf_device::ClusterFuncOp in training loop.
      if (!func_result.wasInterrupted() || !cluster_func.has_value()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 29.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_rewrite_pass.cc

        return WalkResult::advance();
      });
      if (result_init.wasInterrupted()) return signalPassFailure();
      llvm::SmallVector<tf_device::ClusterFuncOp> to_be_erased;
      OpBuilder builder(&getContext());
      auto result = getOperation().walk([&](tf_device::ClusterFuncOp op) {
        if (failed(TF::HasValidCompilationAndReplicationAttributes(*op)))
          return WalkResult::interrupt();
        // Skip non-tpu device cluster_func.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 21:25:12 UTC 2024
    - 29.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/utils/parallel_execute_util.h

    // Wrap `cluster_func` in a `ParallelExecute` with only one child. This
    // can be used to canonicalize IR, so there is always one `ParallelExecute`.
    tf_device::ParallelExecuteOp BuildParallelExecuteOp(
        tf_device::ClusterFuncOp cluster_func, OpBuilder* builder);
    
    // Unwrap `parallel_execute`'s contents if it only has one child.
    LogicalResult RemoveSingletonParallelExecuteOp(
        tf_device::ParallelExecuteOp parallel_execute, OpBuilder* builder);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 13 03:57:18 UTC 2023
    - 1.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/utils/parallel_execute_util.cc

    #include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
    
    namespace mlir {
    namespace TF {
    
    tf_device::ParallelExecuteOp BuildParallelExecuteOp(
        tf_device::ClusterFuncOp cluster_func, OpBuilder* builder) {
      const auto output_types = cluster_func.getResultTypes();
      builder->setInsertionPoint(cluster_func);
      auto parallel_execute = builder->create<tf_device::ParallelExecuteOp>(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 13 03:57:18 UTC 2023
    - 2.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tf2xla/internal/passes/tpu_sharding_identification_pass.cc

    // XlaSharding op.
    void IdentifyXlaShardingForComputationInputs(
        const llvm::SmallVector<std::string>& logical_device_vec,
        bool infer_from_computation, mlir::tf_device::ClusterFuncOp cluster_func,
        mlir::func::FuncOp func, Builder* builder,
        OptionalOpShardingVector& sharding_for_args) {
      // Look up function definition from module.
      Block& function_block = func.front();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 02:01:13 UTC 2024
    - 28.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/annotate-parameter-replication.mlir

        %0 = "tf._D"(%arg0, %arg1) : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
        func.return %0 : tensor<?xi32>
      }
    }
    
    // -----
    
    // Tests that a non-replicated ClusterFuncOp is not annotated.
    
    module attributes {tf.versions = {producer = 888 : i32}} {
      // CHECK-LABEL: func @do_not_annotate_without_replicate
      func.func @do_not_annotate_without_replicate(%arg0: tensor<?xi32>) -> tensor<?xi32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jul 25 02:54:34 UTC 2023
    - 4.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/transforms/annotate_parameter_replication.cc

        v = op->getOperand(0);
      }
      return v;
    }
    
    void AnnotateParameterReplicationPass::runOnOperation() {
      ModuleOp m = getOperation();
      OpBuilder builder(m.getContext());
      m.walk([&](tf_device::ClusterFuncOp cluster_func) {
        auto replicate = cluster_func->getParentOfType<tf_device::ReplicateOp>();
        if (!replicate) return;
        auto mirrored_variable_indices_attr =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 4.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tf2xla/api/v2/cluster_tf_test.cc

          /*is_in_fallback_enabled_mode=*/false));
    
      FuncOp main = mlir_module_->lookupSymbol<mlir::func::FuncOp>("main");
      ASSERT_TRUE(main);
    
      bool has_cluster_op = false;
      main.walk([&](mlir::tf_device::ClusterFuncOp cluster_op) {
        has_cluster_op = true;
        return WalkResult::advance();
      });
    
      EXPECT_TRUE(has_cluster_op);
      EXPECT_EQ(compilation_status.Delta(mlir::TF::kMlirPh1BridgeCounterReplicated,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 21:44:37 UTC 2024
    - 6.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/transforms/tpu_annotate_dynamic_shape_inputs.cc

        return block_arg.getOwner()->getParentOp();
    
      return value.getDefiningOp();
    }
    
    void TPUAnnotateDynamicShapeInputsPass::runOnOperation() {
      getOperation().walk([&](tf_device::ClusterFuncOp cluster_func_op) {
        Builder builder(cluster_func_op->getContext());
        // Skip non-tpu device cluster_func.
        auto cluster_id =
            cluster_func_op->getAttrOfType<StringAttr>(TF::kReplicationInfoAttr);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.2K bytes
    - Viewed (0)
Back to top