- Sort Score
- Result 10 results
- Languages All
Results 1 - 7 of 7 for ADDQconst (0.19 sec)
-
src/cmd/compile/internal/ssa/_gen/AMD64.rules
(SUBQconst [c] x) && c != -(1<<31) => (ADDQconst [-c] x) (SUBLconst [c] x) => (ADDLconst [-c] x) // generic constant folding // TODO: more of this (ADDQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)+d]) (ADDLconst [c] (MOVLconst [d])) => (MOVLconst [c+d]) (ADDQconst [c] (ADDQconst [d] x)) && is32Bit(int64(c)+int64(d)) => (ADDQconst [c+d] x) (ADDLconst [c] (ADDLconst [d] x)) => (ADDLconst [c+d] x)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Mar 12 19:38:41 UTC 2024 - 93.9K bytes - Viewed (0) -
test/fixedbugs/issue21655.go
// license that can be found in the LICENSE file. // Make sure assembly offsets don't get too large. // To trigger issue21655, the index offset needs to be small // enough to fit into an int32 (to get rewritten to an ADDQconst) // but large enough to overflow an int32 after multiplying by the stride. package main func f1(a []int64, i int64) int64 { return a[i+1<<30] } func f2(a []int32, i int64) int32 { return a[i+1<<30] }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Dec 08 03:53:18 UTC 2017 - 1.5K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/rewriteAMD64.go
break } d := auxIntToInt64(v_0.AuxInt) v.reset(OpAMD64MOVQconst) v.AuxInt = int64ToAuxInt(int64(c) + d) return true } // match: (ADDQconst [c] (ADDQconst [d] x)) // cond: is32Bit(int64(c)+int64(d)) // result: (ADDQconst [c+d] x) for { c := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64ADDQconst { break } d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0]
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Mar 12 19:38:41 UTC 2024 - 712.7K bytes - Viewed (0) -
test/codegen/comparisons.go
// amd64:-`CMPQ\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]` // amd64:`CALL` return s1 == s2 } // -------------- // // Ordering // // -------------- // // Test that LEAQ/ADDQconst are folded into SETx ops var r bool func CmpFold(x uint32) { // amd64:`SETHI\t.*\(SB\)` r = x > 4 } // Test that direct comparisons with memory are generated when // possible
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 19 16:31:02 UTC 2024 - 15.2K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/AMD64Ops.go
{name: "ADDQ", argLength: 2, reg: gp21sp, asm: "ADDQ", commutative: true, clobberFlags: true}, {name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true, clobberFlags: true}, {name: "ADDQconst", argLength: 1, reg: gp11sp, asm: "ADDQ", aux: "Int32", typ: "UInt64", clobberFlags: true}, {name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32", clobberFlags: true},
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Aug 04 16:40:24 UTC 2023 - 98K bytes - Viewed (1) -
src/cmd/compile/internal/ssa/regalloc.go
} // Set desired register of input 0 if this is a 2-operand instruction. if opcodeTable[v.Op].resultInArg0 || v.Op == OpAMD64ADDQconst || v.Op == OpAMD64ADDLconst || v.Op == OpSelect0 { // ADDQconst is added here because we want to treat it as resultInArg0 for // the purposes of desired registers, even though it is not an absolute requirement. // This is because we'd rather implement it as ADDQ instead of LEAQ.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Nov 21 17:49:56 UTC 2023 - 87.2K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/opGen.go
}, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 }, }, }, { name: "ADDQconst", auxType: auxInt32, argLen: 1, clobberFlags: true, asm: x86.AADDQ, reg: regInfo{ inputs: []inputInfo{
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 15:49:20 UTC 2024 - 1M bytes - Viewed (0)