- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 58 for MOVW (0.09 sec)
-
src/internal/runtime/syscall/asm_linux_mipsx.s
TEXT ·Syscall6(SB),NOSPLIT,$20-40 MOVW num+0(FP), R2 // syscall entry MOVW a1+4(FP), R4 MOVW a2+8(FP), R5 MOVW a3+12(FP), R6 MOVW a4+16(FP), R7 MOVW a5+20(FP), R8 MOVW a6+24(FP), R9 MOVW R8, 16(R29) MOVW R9, 20(R29) MOVW R0, R3 // reset R3 to 0 as 1-ret SYSCALL keeps it SYSCALL BEQ R7, ok MOVW $-1, R1 MOVW R1, r1+28(FP) MOVW R0, r2+32(FP) MOVW R2, errno+36(FP) RET ok:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Feb 21 21:28:32 UTC 2024 - 876 bytes - Viewed (0) -
src/internal/runtime/syscall/asm_linux_arm.s
TEXT ·Syscall6(SB),NOSPLIT,$0-40 MOVW num+0(FP), R7 // syscall entry MOVW a1+4(FP), R0 MOVW a2+8(FP), R1 MOVW a3+12(FP), R2 MOVW a4+16(FP), R3 MOVW a5+20(FP), R4 MOVW a6+24(FP), R5 SWI $0 MOVW $0xfffff001, R6 CMP R6, R0 BLS ok MOVW $-1, R1 MOVW R1, r1+28(FP) MOVW $0, R2 MOVW R2, r2+32(FP) RSB $0, R0, R0 MOVW R0, errno+36(FP) RET ok: MOVW R0, r1+28(FP) MOVW R1, r2+32(FP)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Feb 21 21:28:32 UTC 2024 - 696 bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_mipsx.s
// return 0; TEXT ·Cas(SB),NOSPLIT,$0-13 MOVW ptr+0(FP), R1 MOVW old+4(FP), R2 MOVW new+8(FP), R5 SYNC try_cas: MOVW R5, R3 LL (R1), R4 // R4 = *R1 BNE R2, R4, cas_fail SC R3, (R1) // *R1 = R3 BEQ R3, try_cas SYNC MOVB R3, ret+12(FP) RET cas_fail: SYNC MOVB R0, ret+12(FP) RET TEXT ·Store(SB),NOSPLIT,$0-8 MOVW ptr+0(FP), R1 MOVW val+4(FP), R2 SYNC MOVW R2, 0(R1) SYNC
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat May 11 21:29:34 UTC 2024 - 4.9K bytes - Viewed (0) -
src/runtime/sys_linux_ppc64x.s
MOVD name+0(FP), R3 MOVW mode+8(FP), R4 MOVW perm+12(FP), R5 SYSCALL $SYS_open BVC 2(PC) MOVW $-1, R3 MOVW R3, ret+16(FP) RET TEXT runtime·closefd(SB),NOSPLIT|NOFRAME,$0-12 MOVW fd+0(FP), R3 SYSCALL $SYS_close BVC 2(PC) MOVW $-1, R3 MOVW R3, ret+8(FP) RET TEXT runtime·write1(SB),NOSPLIT|NOFRAME,$0-28 MOVD fd+0(FP), R3 MOVD p+8(FP), R4 MOVW n+16(FP), R5 SYSCALL $SYS_write BVC 2(PC)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 18:17:17 UTC 2024 - 18.1K bytes - Viewed (0) -
src/crypto/sha1/sha1block_arm.s
ORR Rt2<<16, Rt0, Rt0 ; \ ORR Rt1<<24, Rt0, Rt0 ; \ MOVW.P Rt0, 4(Rw) ; \ ADD Rt0, Re, Re // tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf] // w[i&0xf] = tmp<<1 | tmp>>(32-1) // e += w[i&0xf] #define SHUFFLE(Re) \ MOVW (-16*4)(Rw), Rt0 ; \ MOVW (-14*4)(Rw), Rt1 ; \ MOVW (-8*4)(Rw), Rt2 ; \ EOR Rt0, Rt1, Rt0 ; \ MOVW (-3*4)(Rw), Rt1 ; \ EOR Rt2, Rt0, Rt0 ; \ EOR Rt0, Rt1, Rt0 ; \
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 5.6K bytes - Viewed (0) -
src/runtime/asm_arm.s
MOVW $0, R11 MOVW R11, (g_sched+gobuf_lr)(g) // Switch to m->g0 & its stack, call fn. MOVW g, R1 MOVW g_m(g), R8 MOVW m_g0(R8), R0 BL setg<>(SB) CMP g, R1 B.NE 2(PC) B runtime·badmcall(SB) MOVW fn+0(FP), R0 MOVW (g_sched+gobuf_sp)(g), R13 SUB $8, R13 MOVW R1, 4(R13) MOVW R0, R7 MOVW 0(R0), R0 BL (R0) B runtime·badmcall2(SB) RET
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Feb 23 21:00:52 UTC 2024 - 32.1K bytes - Viewed (0) -
src/runtime/asm_mipsx.s
MOVW R28, 96(R29) // R29 is SP. // R30 is g. // R31 is LR, which was saved by the prologue. CALL runtime·wbBufFlush(SB) MOVW 4(R29), R20 MOVW 8(R29), R21 MOVW 12(R29), R3 MOVW 16(R29), R4 MOVW 20(R29), R5 MOVW 24(R29), R6 MOVW 28(R29), R7 MOVW 32(R29), R8 MOVW 36(R29), R9 MOVW 40(R29), R10 MOVW 44(R29), R11 MOVW 48(R29), R12 MOVW 52(R29), R13 MOVW 56(R29), R14
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 06 11:46:29 UTC 2024 - 26.3K bytes - Viewed (0) -
test/codegen/memcombine.go
// amd64:"MOVQ",-"MOVL",-"SHRQ" // arm64:"MOVD",-"MOVW",-"LSR" // ppc64le:"MOVD",-"MOVW",-"SRD" p.a = uint32(x) // amd64:-"MOVL",-"SHRQ" // arm64:-"MOVW",-"LSR" // ppc64le:-"MOVW",-"SRD" p.b = uint32(x >> 32) } func store32be(p *struct{ a, b uint32 }, x uint64) { // ppc64:"MOVD",-"MOVW",-"SRD" // s390x:"MOVD",-"MOVW",-"SRD" p.a = uint32(x >> 32) // ppc64:-"MOVW",-"SRD" // s390x:-"MOVW",-"SRD"
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Mar 21 19:45:41 UTC 2024 - 29.7K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_ppc64x.s
TEXT ·Or32(SB), NOSPLIT, $0-20 MOVD ptr+0(FP), R3 MOVW val+8(FP), R4 LWSYNC again: LWAR (R3), R6 OR R4, R6, R7 STWCCC R7, (R3) BNE again MOVW R6, ret+16(FP) RET // func And32(addr *uint32, v uint32) old uint32 TEXT ·And32(SB), NOSPLIT, $0-20 MOVD ptr+0(FP), R3 MOVW val+8(FP), R4 LWSYNC again: LWAR (R3),R6 AND R4, R6, R7 STWCCC R7, (R3)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 7.5K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_riscv64.s
TEXT ·And(SB), NOSPLIT, $0-12 MOV ptr+0(FP), A0 MOVW val+8(FP), A1 AMOANDW A1, (A0), ZERO RET // func Or(ptr *uint32, val uint32) TEXT ·Or(SB), NOSPLIT, $0-12 MOV ptr+0(FP), A0 MOVW val+8(FP), A1 AMOORW A1, (A0), ZERO RET // func Or32(ptr *uint32, val uint32) uint32 TEXT ·Or32(SB), NOSPLIT, $0-20 MOV ptr+0(FP), A0 MOVW val+8(FP), A1 AMOORW A1, (A0), A2 MOVW A2, ret+16(FP)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 7K bytes - Viewed (0)