Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 248 for getUses (0.27 sec)

  1. tensorflow/compiler/mlir/tf2xla/internal/passes/hoist_broadcast_read.cc

        Value res = read.getResource();
        Operation* scope = res.getParentBlock()->getParentOp();
        if (!scope->isProperAncestor(replicate)) continue;
        bool has_conflicting_write = false;
        for (OpOperand& use : res.getUses()) {
          Operation* using_op = use.getOwner();
          if (using_op == read) continue;
          if (!replicate->isProperAncestor(using_op)) continue;
          Operation* peer = GetAncestorBelow(using_op, replicate);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/cluster_formation.cc

      for (const auto& p : llvm::zip(live_outs, launch_op.getResults())) {
        Value from = std::get<0>(p);
        // TODO(jingpu): move this to RegionUtils.h in MLIR core.
        for (auto& use : llvm::make_early_inc_range(from.getUses())) {
          if (launch_op_region->isAncestor(use.getOwner()->getParentRegion()))
            continue;
          use.set(std::get<1>(p));
        }
      }
    }
    
    // Get all escaped live-out values of a region.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Oct 05 13:30:21 UTC 2023
    - 6.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/cluster_ops_by_policy.cc

    // -------------------------------------------------------------------------- //
    
    namespace {
    constexpr char kDeviceAttr[] = "device";
    
    // A type that abstracts over types that have uses accessible via `getUses`.
    using Source = PointerUnion<Operation *, BlockArgument *>;
    
    // We use union-find algorithm to build clusters of connected operations based
    // on the user provided policy. If an operation can be clustered (one of the
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 27.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc

    // multiple such activations, one is returned (with no guarantee as to which
    // one). If there are no activation functions that use the output, returns
    // nullptr.
    Operation *GetActivation(Value op) {
      for (auto &use : op.getUses()) {
        if (IsActivationFunction(use.getOwner())) return use.getOwner();
      }
      return nullptr;
    }
    
    // Finds and returns a BiasAdd that uses the result of `op` as the `value`
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 14.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc

        ShapedType new_result_type = old_result_type.clone(quantized_type);
    
        // Insert CastOp if it does not exist yet. Otherwise, just rewire without
        // creating a CastOp.
        for (auto& connected_op : op.getResult().getUses()) {
          auto cast_op = llvm::dyn_cast_or_null<CastOp>(connected_op.getOwner());
          if (cast_op && cast_op.getType() == new_result_type) {
            quantize_op->setOperand(quantize_operand_num, cast_op);
            return;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 20.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/stablehlo/transforms/fuse_convolution_pass.cc

          });
        }
        // For dynamic case, the result of conv should be used by shape_of and mul.
        if (is_dynamic_broadcast) {
          auto conv_uses = (*conv_op.getODSResults(0).begin()).getUses();
          if (std::distance(conv_uses.begin(), conv_uses.end()) != 2 ||
              quant::FindUserOfType<shape::ShapeOfOp>(conv_op) == nullptr ||
              quant::FindUserOfType<mhlo::MulOp>(conv_op) == nullptr) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 22:21:19 UTC 2024
    - 8.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/transforms/legalize_hashtables.cc

               mlir::isa<IntegerType>(key_dtype) &&
               mlir::cast<IntegerType>(key_dtype).getWidth() == 64))) {
          return false;
        }
    
        for (auto& use : hashtable->getUses()) {
          Operation* user = use.getOwner();
    
          // Allow consuming hash table ops that can be covered by TensorFlow Lite
          // hash table kernels.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/transforms/group_by_dialect.cc

      }
    
      // All results in our set that have a user outside our set.
      llvm::DenseSet<Value> outputs_seen;
      for (Operation* op : ops) {
        for (Value result : op->getResults()) {
          for (auto& use : result.getUses()) {
            if (!all_operations.contains(use.getOwner())) {
              if (!outputs_seen.contains(result)) {
                outputs->push_back(result);
                outputs_seen.insert(result);
              }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 17 07:31:01 UTC 2023
    - 8K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_weights.cc

      // terminator.
      bool checkIfAnyUserIsConnectedToTermiantor(BlockArgument op) const {
        for (const auto& user : op.getUsers()) {
          if (user->template hasTrait<OpTrait::IsTerminator>()) return true;
          if (auto next_user = dyn_cast_or_null<TF::IdentityOp>(user)) {
            return (*(next_user->getResult(0).getUsers().begin()))
                ->template hasTrait<OpTrait::IsTerminator>();
          }
        }
        return false;
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 11.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/transforms/tpu_resource_partitioning.cc

      xla::OpSharding sharding;
      sharding.ParseFromString(
          old_partitioned_input.get_XlaShardingAttr().getValue().str());
      for (OpOperand& read_use :
           llvm::make_early_inc_range(old_read.getValue().getUses())) {
        if (dyn_cast_or_null<tf_device::ClusterFuncOp>(read_use.getOwner())) {
          // ClusterFunc's use of the Read is replaced with use of the
          // TPUPartitionedInputV2.
          read_use.set(new_partitioned_input);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 11.8K bytes
    - Viewed (0)
Back to top