- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 15 for cmovz (0.15 sec)
-
src/cmd/internal/obj/mips/anames.go
"ADDF", "ADDU", "ADDW", "AND", "BEQ", "BFPF", "BFPT", "BGEZ", "BGEZAL", "BGTZ", "BLEZ", "BLTZ", "BLTZAL", "BNE", "BREAK", "CLO", "CLZ", "CMOVF", "CMOVN", "CMOVT", "CMOVZ", "CMPEQD", "CMPEQF", "CMPGED", "CMPGEF", "CMPGTD", "CMPGTF", "DIV", "DIVD", "DIVF", "DIVU", "DIVW", "GOK", "LL", "LLV", "LUI", "MADD",
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Aug 08 12:17:12 UTC 2023 - 1.4K bytes - Viewed (0) -
src/cmd/asm/internal/asm/testdata/mips.s
// } NOP $4 // // special // SYSCALL BREAK SYNC // // conditional move on zero/nonzero gp value // CMOVN R1, R2, R3 CMOVZ R1, R2, R3 // // conditional move on fp false/true // CMOVF R1, R2 CMOVT R1, R2 // // conditional traps // TEQ $1, R1, R2 TEQ $1, R1 // // other // CLO R1, R2 SQRTD F0, F1
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Aug 08 12:17:12 UTC 2023 - 6.7K bytes - Viewed (0) -
test/codegen/condmove.go
return a } // Floating point CMOVs are not supported by amd64/arm64/ppc64x func cmovfloatmove(x, y int) float64 { a := 1.0 if x <= y { a = 2.0 } // amd64:-"CMOV" // arm64:-"CSEL" // ppc64x:-"ISEL" // wasm:-"Select" return a } // On amd64, the following patterns trigger comparison inversion. // Test that we correctly invert the CMOV condition var gsink int64 var gusink uint64
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Oct 06 20:57:33 UTC 2023 - 6.2K bytes - Viewed (0) -
test/codegen/spectre.go
package codegen func IndexArray(x *[10]int, i int) int { // amd64:`CMOVQCC` return x[i] } func IndexString(x string, i int) byte { // amd64:`CMOVQ(LS|CC)` return x[i] } func IndexSlice(x []float64, i int) float64 { // amd64:`CMOVQ(LS|CC)` return x[i] } func SliceArray(x *[10]int, i, j int) []int { // amd64:`CMOVQHI` return x[i:j] } func SliceString(x string, i, j int) string {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 19 23:33:25 UTC 2023 - 734 bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/AMD64.rules
=> (CMOVWNE y x (CMPQconst [0] check)) // Absorb InvertFlags (CMOVQ(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond)) => (CMOVQ(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond) (CMOVL(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond)) => (CMOVL(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond) (CMOVW(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond)) => (CMOVW(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Mar 12 19:38:41 UTC 2024 - 93.9K bytes - Viewed (0) -
src/cmd/internal/obj/arm64/a.out.go
C_BITCON // bitfield and logical immediate masks C_ADDCON2 // 24-bit constant C_LCON // 32-bit constant C_MOVCON2 // a constant that can be loaded with one MOVZ/MOVN and one MOVK C_MOVCON3 // a constant that can be loaded with one MOVZ/MOVN and two MOVKs C_VCON // 64-bit constant C_FCON // floating-point constant C_VCONADDR // 64-bit memory address C_AACON // ADDCON offset in auto constant $a(FP)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Oct 18 17:56:30 UTC 2023 - 18.1K bytes - Viewed (0) -
src/cmd/internal/obj/arm64/doc.go
VMOVQ $0x1122334455667788, $0x99aabbccddeeff00, V2 // V2=0x99aabbccddeeff001122334455667788 8. Move an optionally-shifted 16-bit immediate value to a register. The instructions are MOVK(W), MOVZ(W) and MOVN(W), the assembly syntax is "op $(uimm16<<shift), <Rd>". The <uimm16> is the 16-bit unsigned immediate, in the range 0 to 65535; For the 32-bit variant, the <shift> is 0 or 16, for the
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Aug 07 00:21:42 UTC 2023 - 9.6K bytes - Viewed (0) -
src/cmd/internal/obj/mips/a.out.go
AADDF AADDU AADDW AAND ABEQ ABFPF ABFPT ABGEZ ABGEZAL ABGTZ ABLEZ ABLTZ ABLTZAL ABNE ABREAK ACLO ACLZ ACMOVF ACMOVN ACMOVT ACMOVZ ACMPEQD ACMPEQF ACMPGED ACMPGEF ACMPGTD ACMPGTF ADIV ADIVD ADIVF ADIVU ADIVW AGOK ALL ALLV ALUI AMADD AMOVB AMOVBU AMOVD
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Aug 08 12:17:12 UTC 2023 - 7.6K bytes - Viewed (0) -
src/cmd/internal/obj/arm64/asm7.go
rel.Off = int32(c.pc) rel.Siz = 8 rel.Sym = p.From.Sym rel.Add = p.From.Offset rel.Type = objabi.R_ADDRARM64 case 69: /* LE model movd $tlsvar, reg -> movz reg, 0 + reloc */ o1 = c.opirr(p, AMOVZ) o1 |= uint32(p.To.Reg & 31) rel := obj.Addrel(c.cursym) rel.Off = int32(c.pc) rel.Siz = 4 rel.Sym = p.From.Sym rel.Type = objabi.R_ARM64_TLS_LE
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 15:44:14 UTC 2024 - 201.1K bytes - Viewed (0) -
src/cmd/link/internal/amd64/asm.go
writeableData[r.Off()-2] = 0x8d su.SetRelocType(rIdx, objabi.R_PCREL) su.SetRelocAdd(rIdx, r.Add()+4) return true } } // fall back to using GOT and hope for the best (CMOV*) // TODO: just needs relocation, no need to put in .dynsym ld.AddGotSym(target, ldr, syms, targ, uint32(elf.R_X86_64_GLOB_DAT)) su.SetRelocType(rIdx, objabi.R_PCREL) su.SetRelocSym(rIdx, syms.GOT)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Aug 23 05:58:20 UTC 2023 - 21K bytes - Viewed (0)