Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 56 for hasOneUse (0.14 sec)

  1. tensorflow/compiler/mlir/tensorflow/transforms/tpu_resource_partitioning.cc

      // variable. These `tf.TPUPartitionedInputV2` will be removed when rewriting
      // the operands.
      for (Value result : parallel_execute.getExecuteOutputs()) {
        if (!result.hasOneUse()) continue;
        auto assign_var =
            llvm::dyn_cast<TF::AssignVariableOp>(*result.getUsers().begin());
        if (!assign_var || assign_var.getValue() != result) continue;
        auto partitioned_input =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 11.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/stablehlo/transforms/compose_uniform_quantized_type_pass.cc

          LLVM_DEBUG(
              llvm::dbgs()
              << "Failed to match. Output type is expected to be a float. Got: "
              << output_element_type << ".\n");
          return failure();
        }
    
        if (!op->hasOneUse()) {
          LLVM_DEBUG(llvm::dbgs()
                     << "Failed to match op - doesn't have a single use.\n");
          return failure();
        }
    
        auto subtract_op = dyn_cast_or_null<stablehlo::SubtractOp>(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 64.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/tpu_dynamic_layout_pass.cc

        execute_launches.reserve(compile_launch.getNumResults() - 1);
        for (Value program_result :
             llvm::drop_begin(compile_launch.getResults(), 1)) {
          if (!program_result.hasOneUse()) return;
          Operation* user = *program_result.user_begin();
          auto execute = llvm::dyn_cast<TF::TPUExecuteOp>(user);
          if (!execute) return;
          auto execute_launch =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 12.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h

        if (!pre_quantized.hasOneUse()) {
          return failure();
        }
    
        op.emitWarning("Remove trivial `rescale` op. Please fix the source graph.");
    
        llvm::SmallVector<Type, 4> new_output_types;
        for (auto result : def->getResults()) {
          if (result.hasOneUse() && *result.getUsers().begin() == op) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 41.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/passes/duplicate_shape_determining_constants.cc

        Operation* defining_op = curr_operand->get().getDefiningOp();
    
        if (llvm::isa_and_nonnull<TF::ConstOp>(defining_op)) {
          // No need to clone if this is the only use.
          if (defining_op->hasOneUse()) {
            LLVM_DEBUG(llvm::dbgs()
                       << "Not duplicating constant operand since it has only one "
                          "usage. Op: "
                       << curr_operand->getOperandNumber()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 05:52:39 UTC 2024
    - 17.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/transforms/resource_op_lifting_cleanup.cc

        // We can eliminate a result if its unused and the corresponding argument
        // is unused in cond and the only use in body is use it as a return value.
        if (cond_arg.use_empty() && body_arg.hasOneUse() &&
            body_arg.use_begin()->getOperandNumber() == result_idx &&
            body_arg.use_begin()->getOwner() == body_ret) {
          can_eliminate.set(result_idx);
        }
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 19.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/transforms/layout_optimization.cc

        if (perm.getValue() != permutation_op.getValue()) return;
    
        // Add a transpose operation for later reuse only if it's used once.
        if (transpose.getResult().hasOneUse()) transpose_ops.push_back(transpose);
      }
    
      // Nothing to do here.
      if (!permutation_op) return;
    
      // All results after transpose must preserve the original result type.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 19.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_merge_variables_with_execute.cc

        // TODO(lyandy): Handle updates to resource writes by remapping to parent
        // launch result and checking if launch result is an AssignVariableOp.
        auto result = execute_output.value();
        if (!result.hasOneUse()) {
          if (VLOG_IS_ON(2)) {
            bool any_user_is_assign = false;
            for (auto result_user : result.getUsers()) {
              any_user_is_assign |= llvm::isa<TF::AssignVariableOp>(result_user);
            }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 29 17:52:11 UTC 2024
    - 27K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tf2xla/internal/passes/tpu_sharding_identification_pass.cc

    // connected to a `tf_device.cluster_func` result value (via AssignVariableOp/
    // resource write).
    mlir::Operation* GetXlaShardingFromResult(Value value) {
      if (!value.hasOneUse()) return nullptr;
    
      Operation* user = *value.getUsers().begin();
      if (auto partitioned_output =
              llvm::dyn_cast<mlir::TF::TPUPartitionedOutputV2Op>(user))
        return NullUnlessSharded(partitioned_output);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 02:01:13 UTC 2024
    - 28.9K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.cc

                      user)) {
            partitioned_output = partitioned_output_user;
            break;
          }
        }
        if (partitioned_output) {
          if (!old_parallel_execute_output.hasOneUse())
            return partitioned_output.emitOpError()
                   << "must be a unique user of TPU Cluster "
                      "(tf_device.old_parallel_execute) output "
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 22 21:28:13 UTC 2024
    - 34K bytes
    - Viewed (0)
Back to top