Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 6 of 6 for log1p (0.04 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir

      // CHECK: %[[LOG1P:.*]] = "tf.Log1p"(%[[CONCAT]]) {device = "/job:localhost/replica:0/task:0/device:GPU:0"}
      // CHECK: return %[[LOG1P]]
      %0 = "tf.Log1p"(%arg0) : (tensor<?x1xf32>) -> tensor<?x1xf32>
      %1 = "tf.Log1p"(%arg1) : (tensor<?x1xf32>) -> tensor<?x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 22:07:10 UTC 2024
    - 132.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/ir/tf_ops_a_m.cc

    // Hoist coefficient-wise unary operation out of the Concat op:
    //
    //   %0 = "tf.Log1p"(%arg_0)
    //   %1 = "tf.Log1p"(%arg_1)
    //   ...
    //   %n = "tf.Log1p"(%arg_n)
    //   %m = "tf.ConcatV2"(%0, %1, ..., %n, %axis)
    //
    // Rewrite it to:
    //
    //   %0 = "tf.ConcatV2"(%arg_0, %arg_1, ..., %arg_n, %axis)
    //   %1 = "tf.Log1p"(%0)
    class HoistCwiseUnaryOutOfConcat : public OpRewritePattern<TF::ConcatV2Op> {
     public:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 146.7K bytes
    - Viewed (0)
  3. src/cmd/compile/internal/ssa/_gen/generic.rules

    (Div16 <t> n (Const16 [c])) && isPowerOfTwo16(c) =>
      (Rsh16x64
        (Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <typ.UInt64> [15])) (Const64 <typ.UInt64> [int64(16-log16(c))])))
        (Const64 <typ.UInt64> [int64(log16(c))]))
    (Div32 <t> n (Const32 [c])) && isPowerOfTwo32(c) =>
      (Rsh32x64
        (Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <typ.UInt64> [31])) (Const64 <typ.UInt64> [int64(32-log32(c))])))
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 16 22:21:05 UTC 2024
    - 135.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/ir/tfl_ops.cc

      };
      return ConstFoldUnaryOp(result_type, operands[0], compute);
    }
    
    //===----------------------------------------------------------------------===//
    // LogOp
    //===----------------------------------------------------------------------===//
    
    OpFoldResult LogOp::fold(FoldAdaptor adaptor) {
      auto operands = adaptor.getOperands();
      Type result_type = getType();
      // Only constant fold for tensor of f32 is implemented.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 169.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/transforms/optimize.cc

                                    PatternRewriter &rewriter) const override {
        if (sub_op.getFusedActivationFunction() != "NONE") {
          return failure();
        }
        auto log_op = dyn_cast_or_null<TFL::LogOp>(sub_op.getRhs().getDefiningOp());
        if (!log_op || !log_op->hasOneUse()) {
          return failure();
        }
        auto sum_op = dyn_cast_or_null<TFL::SumOp>(log_op.getX().getDefiningOp());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 00:40:15 UTC 2024
    - 102.3K bytes
    - Viewed (0)
  6. src/cmd/internal/obj/arm64/asm7.go

    			o1 = c.opxrrr(p, p.As, rt, r, obj.REG_NONE, true)
    			o1 |= c.encRegShiftOrExt(p, &p.From, p.From.Reg) /* includes reg, op, etc */
    		} else {
    			o1 = c.opxrrr(p, p.As, rt, r, rf, false)
    		}
    
    	case 28: /* logop $vcon, [R], R (64 bit literal) */
    		if p.Reg == REGTMP {
    			c.ctxt.Diag("cannot use REGTMP as source: %v\n", p)
    		}
    		o := uint32(0)
    		num := uint8(0)
    		cls := int(p.From.Class)
    		if isANDWop(p.As) {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 15 15:44:14 UTC 2024
    - 201.1K bytes
    - Viewed (0)
Back to top