- Sort Score
- Result 10 results
- Languages All
Results 1 - 6 of 6 for canonLessThan (0.66 sec)
-
test/codegen/comparisons.go
type Point struct { X, Y int } // invertLessThanNoov checks (LessThanNoov (InvertFlags x)) is lowered as // CMP, CSET, CSEL instruction sequence. InvertFlags are only generated under // certain conditions, see canonLessThan, so if the code below does not // generate an InvertFlags OP, this check may fail. func invertLessThanNoov(p1, p2, p3 Point) bool { // arm64:`CMP`,`CSET`,`CSEL` return (p1.X-p3.X)*(p2.Y-p3.Y)-(p2.X-p3.X)*(p1.Y-p3.Y) < 0
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 19 16:31:02 UTC 2024 - 15.2K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/rewrite.go
// A shift is bounded if it is shifting by less than the width of the shifted value. func shiftIsBounded(v *Value) bool { return v.AuxInt != 0 } // canonLessThan returns whether x is "ordered" less than y, for purposes of normalizing // generated code as much as possible. func canonLessThan(x, y *Value) bool { if x.Op != y.Op { return x.Op < y.Op } if !x.Pos.SameFileAndLine(y.Pos) { return x.Pos.Before(y.Pos) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 19:02:52 UTC 2024 - 64.2K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/rewritePPC64.go
v0.AuxInt = int64ToAuxInt(c) v0.AddArg(y) v.AddArg(v0) return true } // match: (CMP x y) // cond: canonLessThan(x,y) // result: (InvertFlags (CMP y x)) for { x := v_0 y := v_1 if !(canonLessThan(x, y)) { break } v.reset(OpPPC64InvertFlags) v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) v0.AddArg2(y, x) v.AddArg(v0)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 19:02:52 UTC 2024 - 360.2K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/PPC64.rules
(CMPWU (MOVDconst [c]) y) && isU16Bit(c) => (InvertFlags (CMPWUconst y [int32(c)])) // Canonicalize the order of arguments to comparisons - helps with CSE. ((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x)) // n is always a zero-extended uint16 value, so n & z is always a non-negative 32 or 64 bit value. // Rewrite to a cmp int64(0) to lower into ANDCCconst in the latelower pass.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 19:02:52 UTC 2024 - 53.2K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/ARM64.rules
(ADDSflags x (MOVDconst [c])) => (ADDSconstflags [c] x) (ADDconst [c] y) && c < 0 => (SUBconst [-c] y) // Canonicalize the order of arguments to comparisons - helps with CSE. ((CMP|CMPW) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW) y x)) // mul-neg => mneg (NEG (MUL x y)) => (MNEG x y) (NEG (MULW x y)) && v.Type.Size() <= 4 => (MNEGW x y) (MUL (NEG x) y) => (MNEG x y)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 15:49:20 UTC 2024 - 113.1K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/rewriteARM64.go
v0.AuxInt = int64ToAuxInt(c) v0.AddArg(x) v.AddArg(v0) return true } // match: (CMP x y) // cond: canonLessThan(x,y) // result: (InvertFlags (CMP y x)) for { x := v_0 y := v_1 if !(canonLessThan(x, y)) { break } v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v0.AddArg2(y, x) v.AddArg(v0)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 15:49:20 UTC 2024 - 608.6K bytes - Viewed (0)