- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 19 for NEGQ (0.06 sec)
-
test/codegen/mathbits.go
// arm64:"ADDS","ADCS","ADC",-"ADD\t",-"CMP" // amd64:"NEGL","ADCQ","SBBQ","NEGQ" // ppc64x: "ADDC", "ADDE", "ADDZE" // s390x:"ADDE","ADDC\t[$]-1," // riscv64: "ADD","SLTU" return bits.Add(x, y, ci) } func AddC(x, ci uint) (r, co uint) { // arm64:"ADDS","ADCS","ADC",-"ADD\t",-"CMP" // amd64:"NEGL","ADCQ","SBBQ","NEGQ" // loong64: "ADDV", "SGTU" // ppc64x: "ADDC", "ADDE", "ADDZE"
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 18:51:17 UTC 2024 - 19.6K bytes - Viewed (0) -
src/runtime/sys_freebsd_amd64.s
MOVL val+12(FP), DX MOVQ uaddr1+16(FP), R10 MOVQ ut+24(FP), R8 MOVL $SYS__umtx_op, AX SYSCALL JCC 2(PC) NEGQ AX MOVL AX, ret+32(FP) RET TEXT runtime·thr_new(SB),NOSPLIT,$0 MOVQ param+0(FP), DI MOVL size+8(FP), SI MOVL $SYS_thr_new, AX SYSCALL JCC 2(PC) NEGQ AX MOVL AX, ret+16(FP) RET TEXT runtime·thr_start(SB),NOSPLIT,$0 MOVQ DI, R13 // m
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jun 06 18:49:01 UTC 2023 - 12.7K bytes - Viewed (0) -
src/cmd/asm/internal/asm/testdata/amd64.s
// instructions for each rule, to guarantee we cover the same space. #include "../../../../../runtime/textflag.h" TEXT foo(SB), DUPOK|NOSPLIT, $0 // LTYPE1 nonrem { outcode($1, &$2); } NEGQ R11 NEGQ 4(R11) NEGQ foo+4(SB) // LTYPE2 rimnon { outcode($1, &$2); } INT $4 DIVB R11 DIVB 4(R11) DIVB foo+4(SB) // LTYPE3 rimrem { outcode($1, &$2); } SUBQ $4, DI SUBQ R11, DI SUBQ 4(R11), DI
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 09 18:57:21 UTC 2019 - 3.3K bytes - Viewed (0) -
src/runtime/sys_netbsd_amd64.s
MOVQ ndst+40(FP), R9 // arg 6 - newlen MOVQ $SYS___sysctl, AX SYSCALL JCC 4(PC) NEGQ AX MOVL AX, ret+48(FP) RET MOVL $0, AX MOVL AX, ret+48(FP) RET // int32 runtime·kqueue(void) TEXT runtime·kqueue(SB),NOSPLIT,$0 MOVQ $0, DI MOVL $SYS_kqueue, AX SYSCALL JCC 2(PC) NEGQ AX MOVL AX, ret+0(FP) RET
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jun 06 18:49:01 UTC 2023 - 9.8K bytes - Viewed (0) -
src/syscall/asm_linux_amd64.s
MOVQ $0, R10 MOVQ $0, R8 MOVQ $0, R9 MOVQ trap+0(FP), AX // syscall entry POPQ R12 // preserve return address SYSCALL PUSHQ R12 CMPQ AX, $0xfffffffffffff001 JLS ok2 MOVQ $-1, r1+32(FP) NEGQ AX MOVQ AX, err+40(FP) RET ok2: MOVQ AX, r1+32(FP) MOVQ $0, err+40(FP) RET // func rawSyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) TEXT ·rawSyscallNoError(SB),NOSPLIT,$0-48
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Sep 07 19:11:15 UTC 2023 - 1.3K bytes - Viewed (0) -
src/math/big/arith_amd64.s
ADDQ CX, CX // restore CF MOVQ 0(R8)(SI*8), R11 ADCQ 0(R9)(SI*8), R11 MOVQ R11, 0(R10)(SI*8) SBBQ CX, CX // save CF ADDQ $1, SI // i++ SUBQ $1, DI // n-- JG L1 // if n > 0 goto L1 E1: NEGQ CX MOVQ CX, c+72(FP) // return c RET // func subVV(z, x, y []Word) (c Word) // (same as addVV except for SBBQ instead of ADCQ and label names) TEXT ·subVV(SB),NOSPLIT,$0 MOVQ z_len+8(FP), DI
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 19 23:33:27 UTC 2023 - 9.1K bytes - Viewed (0) -
src/runtime/sys_dragonfly_amd64.s
MOVL timeout+12(FP), DX // arg 3 - timeout MOVL $469, AX // umtx_sleep SYSCALL JCC 2(PC) NEGQ AX MOVL AX, ret+16(FP) RET TEXT runtime·sys_umtx_wakeup(SB),NOSPLIT,$0 MOVQ addr+0(FP), DI // arg 1 - ptr MOVL val+8(FP), SI // arg 2 - count MOVL $470, AX // umtx_wakeup SYSCALL JCC 2(PC) NEGQ AX MOVL AX, ret+16(FP) RET TEXT runtime·lwp_create(SB),NOSPLIT,$0
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jun 06 18:49:01 UTC 2023 - 8.3K bytes - Viewed (0) -
src/internal/runtime/syscall/asm_linux_amd64.s
// a6 already in R9. // a5 already in R8. MOVQ SI, R10 // a4 MOVQ DI, DX // a3 MOVQ CX, SI // a2 MOVQ BX, DI // a1 // num already in AX. SYSCALL CMPQ AX, $0xfffffffffffff001 JLS ok NEGQ AX MOVQ AX, CX // errno MOVQ $-1, AX // r1 MOVQ $0, BX // r2 RET ok: // r1 already in AX. MOVQ DX, BX // r2 MOVQ $0, CX // errno
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Feb 21 21:28:32 UTC 2024 - 1.1K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/AMD64.rules
((SHLQ|SHRQ|SARQ) x (NEGQ <t> (ADDQconst [c] y))) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x (NEGQ <t> y)) ((SHLQ|SHRQ|SARQ) x (ANDQconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x y) ((SHLQ|SHRQ|SARQ) x (NEGQ <t> (ANDQconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x (NEGQ <t> y)) ((SHLL|SHRL|SARL) x (ADDQconst [c] y)) && c & 31 == 0 => ((SHLL|SHRL|SARL) x y)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Mar 12 19:38:41 UTC 2024 - 93.9K bytes - Viewed (0) -
src/internal/bytealg/equal_amd64.s
RET // remaining 0-8 bytes leftover: MOVQ -8(SI)(BX*1), CX MOVQ -8(DI)(BX*1), DX CMPQ CX, DX SETEQ AX RET small: CMPQ BX, $0 JEQ equal LEAQ 0(BX*8), CX NEGQ CX CMPB SI, $0xf8 JA si_high // load at SI won't cross a page boundary. MOVQ (SI), SI JMP si_finish si_high: // address ends in 11111xxx. Load up to bytes we want, move to correct position.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Nov 17 16:34:40 UTC 2023 - 2.8K bytes - Viewed (0)