- Sort Score
- Result 10 results
- Languages All
Results 71 - 80 of 95 for vmov (0.04 sec)
-
test/codegen/README
introduced, for example, by the compiler's optimization passes. Any given line of Go code could get assigned more instructions than it may appear from reading the source. In particular, matching all MOV instructions should be avoided; the compiler may add them for unrelated reasons and this may render the test ineffective. -- Line matching logic Regexps are always matched from the start of the instructions line.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jul 18 19:55:29 UTC 2023 - 5.2K bytes - Viewed (0) -
src/cmd/asm/internal/asm/testdata/amd64error.s
CALL (AX)(PC*1) // ERROR "invalid instruction" CALL (AX)(SB*1) // ERROR "invalid instruction" CALL (AX)(FP*1) // ERROR "invalid instruction" // Forbid memory operands for MOV CR/DR. See #24981. MOVQ CR0, (AX) // ERROR "invalid instruction" MOVQ CR2, (AX) // ERROR "invalid instruction" MOVQ CR3, (AX) // ERROR "invalid instruction"
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Jun 14 00:03:57 UTC 2023 - 8.9K bytes - Viewed (0) -
src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/arg.go
// its least significant bit [Yes|No] inverted, e.g. // arg_cond_AllowALNV_Normal // arg_cond_NotAllowALNV_Invert // // - arg_immediate_OptLSL_amount_16_0_48: // An immediate for MOV[KNZ] instruction encoded in imm16[20:5] with an optional // left shift of 16 in the range [0, 48] encoded in hw[22, 21] //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Aug 16 17:57:48 UTC 2017 - 20K bytes - Viewed (0) -
src/cmd/vendor/golang.org/x/arch/x86/x86asm/plan9x.go
CVTTSS2SI: true, DEC: true, DIV: true, FLDENV: true, FRSTOR: true, IDIV: true, IMUL: true, IN: true, INC: true, LEA: true, MOV: true, MOVNTI: true, MUL: true, NEG: true, NOP: true, NOT: true, OR: true, OUT: true, POP: true, POPA: true, POPCNT: true,
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Jul 12 20:38:21 UTC 2023 - 7.2K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/MIPSOps.go
// SYNC // MOV(B|W) (Rarg0), Rout // SYNC {name: "LoweredAtomicLoad8", argLength: 2, reg: gpload, faultOnNilArg0: true}, {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, faultOnNilArg0: true}, // store arg1 to arg0. arg2=mem. returns memory. // SYNC // MOV(B|W) Rarg1, (Rarg0) // SYNC
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 24 14:43:03 UTC 2023 - 24K bytes - Viewed (0) -
src/cmd/internal/obj/arm64/a.out.go
AVBIT AVBSL AVCMEQ AVCMTST AVCNT AVDUP AVEOR AVEOR3 AVEXT AVFMLA AVFMLS AVLD1 AVLD1R AVLD2 AVLD2R AVLD3 AVLD3R AVLD4 AVLD4R AVMOV AVMOVD AVMOVI AVMOVQ AVMOVS AVORR AVPMULL AVPMULL2 AVRAX1 AVRBIT AVREV16 AVREV32 AVREV64 AVSHL AVSLI AVSRI AVST1 AVST2 AVST3 AVST4
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Oct 18 17:56:30 UTC 2023 - 18.1K bytes - Viewed (0) -
src/cmd/vendor/golang.org/x/arch/ppc64/ppc64asm/plan9.go
case LIS: return "ADDIS $0," + args[1] + "," + args[0] // store instructions with index registers case STBX, STBUX, STHX, STHUX, STWX, STWUX, STDX, STDUX, STHBRX, STWBRX, STDBRX, STSWX, STFIWX: return "MOV" + op[2:len(op)-1] + " " + args[0] + ",(" + args[2] + ")(" + args[1] + ")" case STDCXCC, STWCXCC, STHCXCC, STBCXCC: return op + " " + args[0] + ",(" + args[2] + ")(" + args[1] + ")"
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Nov 22 17:16:14 UTC 2022 - 10.9K bytes - Viewed (0) -
src/cmd/link/internal/x86/asm.go
thunkfunc.SetType(sym.STEXT) ldr.SetAttrLocal(thunkfunc.Sym(), true) o := func(op ...uint8) { for _, op1 := range op { thunkfunc.AddUint8(op1) } } // 8b 04 24 mov (%esp),%eax // Destination register is in bits 3-5 of the middle byte, so add that in. o(0x8b, 0x04+r.num<<3, 0x24) // c3 ret o(0xc3) thunks = append(thunks, thunkfunc.Sym()) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Aug 23 05:58:20 UTC 2023 - 13.6K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/AMD64Ops.go
{name: "CMOVWCS", argLength: 3, reg: gp21, asm: "CMOVWCS", resultInArg0: true}, // CMOV with floating point instructions. We need separate pseudo-op to handle // InvertFlags correctly, and to generate special code that handles NaN (unordered flag). // NOTE: the fact that CMOV*EQF here is marked to generate CMOV*NE is not a bug. See // code generation in amd64/ssa.go.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Aug 04 16:40:24 UTC 2023 - 98K bytes - Viewed (1) -
src/cmd/link/internal/amd64/asm.go
writeableData[r.Off()-2] = 0x8d su.SetRelocType(rIdx, objabi.R_PCREL) su.SetRelocAdd(rIdx, r.Add()+4) return true } } // fall back to using GOT and hope for the best (CMOV*) // TODO: just needs relocation, no need to put in .dynsym ld.AddGotSym(target, ldr, syms, targ, uint32(elf.R_X86_64_GLOB_DAT)) su.SetRelocType(rIdx, objabi.R_PCREL) su.SetRelocSym(rIdx, syms.GOT)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Aug 23 05:58:20 UTC 2023 - 21K bytes - Viewed (0)