Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 56 for hasOneUse (0.14 sec)

  1. tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td

       (FloatValueEquals<"0.797884583"> $Cst_sqrt_2dPi),
       (FloatValueEquals<"0.044715"> $Coeff),
       (HasOneUse $mul_out),
       (HasOneUse $add_out),
       (HasOneUse $tanh_out),
       (HasOneUse $mul_out1),
       (HasOneUse $add_out1),
       (HasOneUse $mul_out2),
       (HasOneUse $pow_out),
      ]>;
    
    // Alternate pattern for GeluApproximate (see different order for mul), replaces
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 66.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/optimize.td

    def DefinedByConv2D : Constraint<CPred<"llvm::isa_and_nonnull<mlir::TF::Conv2DOp>($0.getDefiningOp())">>;
    // Checks if the value has only one user.
    def HasOneUse : Constraint<CPred<"$0.hasOneUse()">>;
    
    // If we see a Conv2D op followed by Mul, then multiply the filter
    // with the value in Mul.
    def FuseMulAndConv2D :
      Pat<(TF_MulOp:$mul (TF_Conv2DOp:$conv $input,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 22 07:31:23 UTC 2023
    - 5.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.td

          $input,
          (MultiplyFakeQuantValue $weight,
            (MakeOneDimValueBroadcastable $mul_rhs, $weight))),
        (MultiplyFakeQuantValue $bias, $mul_rhs), $data_format),
      [(HasOneUse $conv_out),
       (HasOneUse $bias_add),
       (HasRankOf<1> $mul_rhs_value),
       (HasStaticShapeConstraint $weight),
       (CanBeSymmetricallyQuantized $weight),
       (CanBeSymmetricallyQuantized $bias),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 03:24:59 UTC 2024
    - 8.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/transforms/dilated_conv.h

        squeeze_op = llvm::cast<TF::SqueezeOp>(consumer_op);
        if (!expand_op.getResult().hasOneUse()) {
          return rewriter.notifyMatchFailure(
              expand_op, "result for current op has more than 1 use");
        }
        if (!squeeze_op.getResult().hasOneUse()) {
          return rewriter.notifyMatchFailure(
              squeeze_op, "result for current op has more than 1 use");
        }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 20K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/transforms/mark_input_output_aliases.cc

                device_return->getParentRegion()->getRegionNumber());
            if (operand_idx >= execute_results.size()) return nullptr;
    
            auto result_from_use = execute_results[operand_idx];
            if (!result_from_use.hasOneUse()) return nullptr;
    
            device_return = result_from_use.use_begin()->getOwner();
            if (!device_return) return nullptr;
          }
        } else {
          LLVM_DEBUG(llvm::dbgs()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 04:14:26 UTC 2024
    - 7.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/transforms/gpu_fusion.cc

    // For the second pattern, there is not good way in the framework to handle the
    // commutativity of the AddV2: we want the FusedBatchNormV3 on any side.
    // Also we need some native calls to handle the "hasOneUse" aspects and the
    // optional extra operands for the AddV2 case.
    struct ReluToFusedBatchNorm : public OpRewritePattern<ReluOp> {
      using OpRewritePattern<ReluOp>::OpRewritePattern;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Nov 03 12:35:38 UTC 2022
    - 5.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/stablehlo/transforms/optimize.cc

          first.getStrides().getSplatValue<IntegerAttr>().getInt() != 1 ||
          first.getStrides() != second.getStrides())
        return rewriter.notifyMatchFailure(concat, "slice ops must have stride=1");
      if (!first->hasOneUse() || !second->hasOneUse())
        return rewriter.notifyMatchFailure(concat, "slice ops are used elsewhere");
    
      SmallVector<int64_t> new_start;
      SmallVector<int64_t> new_limit;
      SmallVector<int64_t> new_slice_shape;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 26.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/transforms/push_transpose_through_ewise.cc

          return failure();
        }
    
        // Compute inverse of input transpose.
        llvm::SmallVector<int32_t> inverse_perm_arr = InvertPermutation(perm1_arr);
    
        if (!(tpose_arg1->hasOneUse() || tpose_arg2->hasOneUse())) {
          return failure();
        }
    
        auto current_out_type =
            llvm::dyn_cast<RankedTensorType>(op->getResult(0).getType());
        auto new_out_type = RankedTensorType::get(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 12.5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/stablehlo/transforms/optimize_layout.cc

        RankedTensorType pad_type = pad_op.getType().cast<RankedTensorType>();
    
        auto transpose_op = pad_input.getDefiningOp<stablehlo::TransposeOp>();
        if (!transpose_op || !transpose_op->hasOneUse()) return failure();
        Value transpose_input = transpose_op.getOperand();
    
        ArrayRef<int64_t> transpose_perm = transpose_op.getPermutation();
        SmallVector<int64_t> new_padding_low =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 21:59:06 UTC 2024
    - 8.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc

        }
    
        // If the contraction is used in multiple places, fusing it will only create
        // more contraction nodes, which is slower.
        if (!contraction.getResult().hasOneUse())
          return rewriter.notifyMatchFailure(contraction,
                                             "result is used by multiple ops");
    
        BiasAddOp bias_add = GetBiasAdd(contraction.getResult());
        if (!bias_add) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 14.9K bytes
    - Viewed (0)
Back to top