Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 31 for MulOp (0.05 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/cc/constant_fold_test.cc

      ASSERT_THAT(test_func, NotNull());
    
      Operation* op_to_fold = FindOperationOfType<TF::MulOp>(test_func);
      SmallVector<Value> results = ConstantFoldOpIfPossible(op_to_fold);
      EXPECT_THAT(results, SizeIs(1));
      // No constant-folding since the second operand is an argument.
      TF::MulOp mul_op = dyn_cast_or_null<TF::MulOp>(results[0].getDefiningOp());
      EXPECT_THAT(mul_op, NotNull());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 04 07:19:09 UTC 2024
    - 10.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/ir/tf_arith_ops_folder.h

    #include "mlir/IR/Value.h"  // from @llvm-project
    #include "mlir/Support/LLVM.h"  // from @llvm-project
    
    namespace mlir {
    
    class Operation;
    
    namespace TF {
    
    class AddV2Op;
    class SubOp;
    class MulOp;
    class DivOp;
    class RealDivOp;
    
    // Verifies an reduction op's `input` and reduction `dims`.
    LogicalResult VerifyReductionInputAndDims(Value input, Value dims,
                                              Location loc);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/stablehlo/transforms/fuse_convolution_pass.cc

    #include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
    
    namespace mlir {
    namespace odml {
    
    class FuseMhloMulAndConvolutionPattern : public OpRewritePattern<mhlo::MulOp> {
     public:
      using OpRewritePattern<mhlo::MulOp>::OpRewritePattern;
    
      LogicalResult matchAndRewrite(mhlo::MulOp mul_op,
                                    PatternRewriter &rewriter) const override {
        // Variables for capturing values and attributes used while creating ops.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 22:21:19 UTC 2024
    - 8.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/stablehlo/odml_converter/transforms/outline_composites.cc

        auto rhs_mul = llvm::dyn_cast_or_null<stablehlo::MulOp>(erf_input);
        if (!rhs_mul) return failure();
    
        auto lhs_mul =
            llvm::dyn_cast_or_null<stablehlo::MulOp>(erf_user_user_input);
        if (!lhs_mul) return failure();
    
        auto output_mul = llvm::dyn_cast_or_null<stablehlo::MulOp>(erf_user_user);
        if (!output_mul) return failure();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 9.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/stablehlo/transforms/fold_broadcast_pass.cc

        return success();
      }
    };
    
    using FoldBroadcastInDimBeforeMulOp =
        FoldBroadcastInDimBeforeBinaryElementwiseOp<mhlo::MulOp>;
    
    // Constant folds mhlo.mul, this folder doesn't have an upper limit on how many
    // elements can be folded.
    LogicalResult ConstantFoldMul(mhlo::MulOp op, PatternRewriter &rewriter) {
      ShapedType type = mlir::dyn_cast<ShapedType>(op.getType());
      Type etype = type.getElementType();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/stablehlo/transforms/unfuse_batch_norm_pass.cc

        multiplier = rewriter.create<mhlo::RsqrtOp>(bn_op.getLoc(), multiplier);
        multiplier = rewriter.create<mhlo::MulOp>(bn_op.getLoc(), multiplier,
                                                  bn_op.getScale());
    
        // Compute rhs = offset - mean * multiplier
        Value rhs = rewriter.create<mhlo::MulOp>(bn_op.getLoc(), multiplier,
                                                 bn_op.getMean());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/transforms/optimize.cc

    // TODO(b/136285429): Move to tablegen when variadic is supported
    struct FuseFullyConnectedAndMul : public OpRewritePattern<TFL::MulOp> {
      using OpRewritePattern<TFL::MulOp>::OpRewritePattern;
    
      LogicalResult matchAndRewrite(TFL::MulOp mul_op,
                                    PatternRewriter &rewriter) const override {
        // If we are broadcasting on the lhs then don't fold the multiply as it
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 00:40:15 UTC 2024
    - 102.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.cc

        auto quant_max_sub =
            rewriter.create<SubOp>(op.getLoc(), quant_max, nudged_zero_point_val);
    
        auto nudged_float_min =
            rewriter.create<MulOp>(op.getLoc(), quant_min_sub, quant_to_float);
    
        auto nudged_float_max =
            rewriter.create<MulOp>(op.getLoc(), quant_max_sub, quant_to_float);
    
        // Now quantize the input value with the approximated min/max values.
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/transforms/decompose_resource_ops.cc

                               CastOp::getOperationName(),
                               ConstOp::getOperationName(),
                               LessOp::getOperationName(),
                               MulOp::getOperationName(),
                               PadOp::getOperationName(),
                               PackOp::getOperationName(),
                               ReadVariableOp::getOperationName(),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Nov 03 12:35:38 UTC 2022
    - 8.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.td

      [(HasOneUse $bias_add),
       (ReshapableTo1DTensor $add_rhs),
       (HasEqualElementSize<[-1], [-1]> $bias, $add_rhs)]>;
    
    // Fuse AffineOp followed by an MulOp patterns.
    def FuseAffineOpAndMul : Pat<
      (TF_MulOp
        (SupportedAffineOpMatcher $conv_out, $input, $weight),
        (TF_ConstOp:$mul_rhs IsFloatElementsAttr:$mul_rhs_value)),
      (CloneOpWithReplacedOperands
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 03:24:59 UTC 2024
    - 8.4K bytes
    - Viewed (0)
Back to top