Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 28 for hasOneUse (0.74 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/passes/replace_stablehlo_ops_in_main_function_with_xla_call_module_ops.cc

            constant_op.getResult().hasOneUse())
          continue;
        // Do not duplicate constant op if the size is too large.
        // 32 is chosen to be larger than all constants useful for shape references,
        // while not too large to possibly significantly increase model size.
        if (constant_op.getValue().getNumElements() > 32) continue;
        while (!constant_op.getResult().hasOneUse()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 21K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.cc

    };
    
    bool PrepareQuantizePass::SetInputNodesQuantizationParams(func::FuncOp func) {
      StringRef func_name = func.getName();
      auto has_quantize_op = [&](const Value arg) {
        return (arg.hasOneUse() &&
                llvm::isa<quantfork::QuantizeCastOp>(*arg.user_begin()));
      };
    
      bool need_to_set_input_nodes_quantization_params = false;
      for (const BlockArgument arg : func.getArguments()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_variable_runtime_reformatting.cc

    Value SkipIdentity(Value v, bool allow_other_use,
                       llvm::SmallPtrSet<Operation*, 4>* skipped = nullptr) {
      while (auto result = mlir::dyn_cast<OpResult>(v)) {
        if (!(allow_other_use || v.hasOneUse())) break;
        auto op = result.getDefiningOp();
        if (!llvm::isa<TF::IdentityOp, TF::IdentityNOp>(op)) {
          break;
        }
        v = op->getOperand(result.getResultNumber());
        if (skipped) skipped->insert(op);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 21.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc

              continue;
            }
            auto result_tensor_type = result_type.cast<TensorType>();
            // If the user is the Quantize op, it must be the only user.
            if (result.hasOneUse() &&
                llvm::isa<quantfork::QuantizeCastOp>(*result.user_begin())) {
              auto user =
                  llvm::cast<quantfork::QuantizeCastOp>(*result.user_begin());
              outputs_replaced.insert(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 05:52:39 UTC 2024
    - 23.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.h

              continue;
            }
            Type result_ele_type =
                mlir::cast<TensorType>(result.getType()).getElementType();
            // If the user is the QuantizeOp, it must be the only user.
            if (result.hasOneUse() && isa<QuantizeOpT>(*result.user_begin())) {
              auto user = cast<QuantizeOpT>(*result.user_begin());
              outputs_replaced.insert(
                  {user.getResult(), enumerated_result.index()});
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/stablehlo/passes/insert_weight_param.cc

          return failure();
        }
        auto type = mlir::cast<TensorType>(op->getResult(0).getType());
        if (!type || !type.getElementType().isF32()) {
          return failure();
        }
        return success(
            op->hasOneUse() &&
            IsWeightQuantizableFunction(*op->getUses().begin(), type.getRank()));
      }
    
      // Checks if the operand is second operand of `tf.XlaCallModule` op for
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 05:56:10 UTC 2024
    - 10.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/transforms/tpu_resource_partitioning.cc

      // variable. These `tf.TPUPartitionedInputV2` will be removed when rewriting
      // the operands.
      for (Value result : parallel_execute.getExecuteOutputs()) {
        if (!result.hasOneUse()) continue;
        auto assign_var =
            llvm::dyn_cast<TF::AssignVariableOp>(*result.getUsers().begin());
        if (!assign_var || assign_var.getValue() != result) continue;
        auto partitioned_input =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 11.8K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/transforms/tpu_dynamic_layout_pass.cc

        execute_launches.reserve(compile_launch.getNumResults() - 1);
        for (Value program_result :
             llvm::drop_begin(compile_launch.getResults(), 1)) {
          if (!program_result.hasOneUse()) return;
          Operation* user = *program_result.user_begin();
          auto execute = llvm::dyn_cast<TF::TPUExecuteOp>(user);
          if (!execute) return;
          auto execute_launch =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 12.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h

        if (!pre_quantized.hasOneUse()) {
          return failure();
        }
    
        op.emitWarning("Remove trivial `rescale` op. Please fix the source graph.");
    
        llvm::SmallVector<Type, 4> new_output_types;
        for (auto result : def->getResults()) {
          if (result.hasOneUse() && *result.getUsers().begin() == op) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 41.7K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/passes/duplicate_shape_determining_constants.cc

        Operation* defining_op = curr_operand->get().getDefiningOp();
    
        if (llvm::isa_and_nonnull<TF::ConstOp>(defining_op)) {
          // No need to clone if this is the only use.
          if (defining_op->hasOneUse()) {
            LLVM_DEBUG(llvm::dbgs()
                       << "Not duplicating constant operand since it has only one "
                          "usage. Op: "
                       << curr_operand->getOperandNumber()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 05:52:39 UTC 2024
    - 17.5K bytes
    - Viewed (0)
Back to top