Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 13 for canonLessThan (0.17 sec)

  1. test/codegen/comparisons.go

    type Point struct {
    	X, Y int
    }
    
    // invertLessThanNoov checks (LessThanNoov (InvertFlags x)) is lowered as
    // CMP, CSET, CSEL instruction sequence. InvertFlags are only generated under
    // certain conditions, see canonLessThan, so if the code below does not
    // generate an InvertFlags OP, this check may fail.
    func invertLessThanNoov(p1, p2, p3 Point) bool {
    	// arm64:`CMP`,`CSET`,`CSEL`
    	return (p1.X-p3.X)*(p2.Y-p3.Y)-(p2.X-p3.X)*(p1.Y-p3.Y) < 0
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Apr 19 16:31:02 UTC 2024
    - 15.2K bytes
    - Viewed (0)
  2. src/cmd/compile/internal/ssa/rewrite.go

    // A shift is bounded if it is shifting by less than the width of the shifted value.
    func shiftIsBounded(v *Value) bool {
    	return v.AuxInt != 0
    }
    
    // canonLessThan returns whether x is "ordered" less than y, for purposes of normalizing
    // generated code as much as possible.
    func canonLessThan(x, y *Value) bool {
    	if x.Op != y.Op {
    		return x.Op < y.Op
    	}
    	if !x.Pos.SameFileAndLine(y.Pos) {
    		return x.Pos.Before(y.Pos)
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jun 07 19:02:52 UTC 2024
    - 64.2K bytes
    - Viewed (0)
  3. src/cmd/compile/internal/ssa/rewritePPC64.go

    		v0.AuxInt = int64ToAuxInt(c)
    		v0.AddArg(y)
    		v.AddArg(v0)
    		return true
    	}
    	// match: (CMP x y)
    	// cond: canonLessThan(x,y)
    	// result: (InvertFlags (CMP y x))
    	for {
    		x := v_0
    		y := v_1
    		if !(canonLessThan(x, y)) {
    			break
    		}
    		v.reset(OpPPC64InvertFlags)
    		v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
    		v0.AddArg2(y, x)
    		v.AddArg(v0)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jun 07 19:02:52 UTC 2024
    - 360.2K bytes
    - Viewed (0)
  4. src/cmd/compile/internal/ssa/rewrite386.go

    		v0.AuxInt = int8ToAuxInt(int8(c))
    		v0.AddArg(x)
    		v.AddArg(v0)
    		return true
    	}
    	// match: (CMPB x y)
    	// cond: canonLessThan(x,y)
    	// result: (InvertFlags (CMPB y x))
    	for {
    		x := v_0
    		y := v_1
    		if !(canonLessThan(x, y)) {
    			break
    		}
    		v.reset(Op386InvertFlags)
    		v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
    		v0.AddArg2(y, x)
    		v.AddArg(v0)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Apr 21 21:05:46 UTC 2023
    - 262.4K bytes
    - Viewed (0)
  5. src/cmd/compile/internal/ssa/rewriteS390X.go

    		v0.AuxInt = int32ToAuxInt(int32(c))
    		v0.AddArg(x)
    		v.AddArg(v0)
    		return true
    	}
    	// match: (CMP x y)
    	// cond: canonLessThan(x,y)
    	// result: (InvertFlags (CMP y x))
    	for {
    		x := v_0
    		y := v_1
    		if !(canonLessThan(x, y)) {
    			break
    		}
    		v.reset(OpS390XInvertFlags)
    		v0 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
    		v0.AddArg2(y, x)
    		v.AddArg(v0)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Oct 12 18:09:26 UTC 2023
    - 395.1K bytes
    - Viewed (0)
  6. src/cmd/compile/internal/ssa/_gen/PPC64.rules

    (CMPWU (MOVDconst [c]) y) && isU16Bit(c) => (InvertFlags (CMPWUconst y [int32(c)]))
    
    // Canonicalize the order of arguments to comparisons - helps with CSE.
    ((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
    
    // n is always a zero-extended uint16 value, so n & z is always a non-negative 32 or 64 bit value.
    // Rewrite to a cmp int64(0) to lower into ANDCCconst in the latelower pass.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jun 07 19:02:52 UTC 2024
    - 53.2K bytes
    - Viewed (0)
  7. src/cmd/compile/internal/ssa/_gen/S390X.rules

      => (RISBGZ x {s390x.NewRotateParams(r.Start, r.Start, -r.Start&63)})
    
    // Canonicalize the order of arguments to comparisons - helps with CSE.
    ((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
    
    // Use sign/zero extend instead of RISBGZ.
    (RISBGZ x {r}) && r == s390x.NewRotateParams(56, 63, 0) => (MOVBZreg x)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Oct 12 18:09:26 UTC 2023
    - 74.3K bytes
    - Viewed (0)
  8. src/cmd/compile/internal/ssa/rewriteAMD64.go

    		v0.AuxInt = int8ToAuxInt(int8(c))
    		v0.AddArg(x)
    		v.AddArg(v0)
    		return true
    	}
    	// match: (CMPB x y)
    	// cond: canonLessThan(x,y)
    	// result: (InvertFlags (CMPB y x))
    	for {
    		x := v_0
    		y := v_1
    		if !(canonLessThan(x, y)) {
    			break
    		}
    		v.reset(OpAMD64InvertFlags)
    		v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
    		v0.AddArg2(y, x)
    		v.AddArg(v0)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Mar 12 19:38:41 UTC 2024
    - 712.7K bytes
    - Viewed (0)
  9. src/cmd/compile/internal/ssa/_gen/AMD64.rules

    (CMPB x (MOVLconst [c])) => (CMPBconst x [int8(c)])
    (CMPB (MOVLconst [c]) x) => (InvertFlags (CMPBconst x [int8(c)]))
    
    // Canonicalize the order of arguments to comparisons - helps with CSE.
    (CMP(Q|L|W|B) x y) && canonLessThan(x,y) => (InvertFlags (CMP(Q|L|W|B) y x))
    
    // Using MOVZX instead of AND is cheaper.
    (AND(Q|L)const [  0xFF] x) => (MOVBQZX x)
    (AND(Q|L)const [0xFFFF] x) => (MOVWQZX x)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Mar 12 19:38:41 UTC 2024
    - 93.9K bytes
    - Viewed (0)
  10. src/cmd/compile/internal/ssa/_gen/ARM64.rules

    (ADDSflags x (MOVDconst [c]))  => (ADDSconstflags [c] x)
    
    (ADDconst [c] y) && c < 0 => (SUBconst [-c] y)
    
    // Canonicalize the order of arguments to comparisons - helps with CSE.
    ((CMP|CMPW) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW) y x))
    
    // mul-neg => mneg
    (NEG  (MUL  x y)) => (MNEG  x y)
    (NEG  (MULW x y)) && v.Type.Size() <= 4 => (MNEGW x y)
    (MUL  (NEG  x) y) => (MNEG  x y)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 15:49:20 UTC 2024
    - 113.1K bytes
    - Viewed (0)
Back to top