- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 16 for ADDL (0.06 sec)
-
src/crypto/md5/md5block_amd64.s
MOVL d, R9; \ XORL c, R9; \ XORL b, R9; \ ADDL $const, a; \ ADDL R8, a; \ MOVL (index*4)(SI),R8; \ ADDL R9, a; \ ROLL $shift, a; \ ADDL b, a #define ROUND3(a, b, c, d, index, const, shift) \ XORL a, R9; \ XORL b, R9; \ ADDL $const, a; \ ADDL R8, a; \ MOVL (index*4)(SI),R8; \ ADDL R9, a; \ ROLL $shift, a; \ ADDL b, a ROUND3FIRST(AX,BX,CX,DX, 8,0xfffa3942, 4);
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 4.8K bytes - Viewed (0) -
src/crypto/sha256/sha256block_386.s
ADDL (4*4)(DI), AX MOVL AX, (4*4)(DI) MOVL AX, (4*4)(BP) MOVL (5*4)(BP), BX // H5 = f + H5 ADDL (5*4)(DI), BX MOVL BX, (5*4)(DI) MOVL BX, (5*4)(BP) MOVL (6*4)(BP), CX // H6 = g + H6 ADDL (6*4)(DI), CX MOVL CX, (6*4)(DI) MOVL CX, (6*4)(BP) MOVL (7*4)(BP), DX // H7 = h + H7 ADDL (7*4)(DI), DX MOVL DX, (7*4)(DI) MOVL DX, (7*4)(BP) ADDL $64, SI CMPL SI, 288(SP)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 8.2K bytes - Viewed (0) -
src/crypto/sha256/sha256block_amd64.s
MOVQ dig+0(FP), BP ADDL (0*4)(BP), R8 // H0 = a + H0 MOVL R8, (0*4)(BP) ADDL (1*4)(BP), R9 // H1 = b + H1 MOVL R9, (1*4)(BP) ADDL (2*4)(BP), R10 // H2 = c + H2 MOVL R10, (2*4)(BP) ADDL (3*4)(BP), R11 // H3 = d + H3 MOVL R11, (3*4)(BP) ADDL (4*4)(BP), R12 // H4 = e + H4 MOVL R12, (4*4)(BP) ADDL (5*4)(BP), R13 // H5 = f + H5 MOVL R13, (5*4)(BP)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 47.3K bytes - Viewed (0) -
src/crypto/md5/md5block_386.s
ROUND4(AX,BX,CX,DX,11,0xf7537e82, 6); ROUND4(DX,AX,BX,CX, 2,0xbd3af235,10); ROUND4(CX,DX,AX,BX, 9,0x2ad7d2bb,15); ROUND4(BX,CX,DX,AX, 0,0xeb86d391,21); ADDL 0(SP), AX ADDL 4(SP), BX ADDL 8(SP), CX ADDL 12(SP), DX ADDL $64, SI CMPL SI, 16(SP) JB loop end: MOVL dig+0(FP), BP MOVL AX, (0*4)(BP) MOVL BX, (1*4)(BP) MOVL CX, (2*4)(BP) MOVL DX, (3*4)(BP)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 4.5K bytes - Viewed (0) -
src/crypto/sha1/sha1block_386.s
ROUND4(BP, AX, BX, CX, DX, 76) ROUND4(DX, BP, AX, BX, CX, 77) ROUND4(CX, DX, BP, AX, BX, 78) ROUND4(BX, CX, DX, BP, AX, 79) ADDL 64(SP), AX ADDL 68(SP), BX ADDL 72(SP), CX ADDL 76(SP), DX ADDL 80(SP), BP MOVL 88(SP), SI ADDL $64, SI CMPL SI, 84(SP) JB loop end: MOVL dig+0(FP), DI MOVL AX, (0*4)(DI) MOVL BX, (1*4)(DI) MOVL CX, (2*4)(DI)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 6K bytes - Viewed (0) -
src/crypto/sha1/sha1block_amd64.s
ROUND4(AX, BX, CX, DX, BP, 75) ROUND4(BP, AX, BX, CX, DX, 76) ROUND4(DX, BP, AX, BX, CX, 77) ROUND4(CX, DX, BP, AX, BX, 78) ROUND4(BX, CX, DX, BP, AX, 79) ADDL R11, AX ADDL R12, BX ADDL R13, CX ADDL R14, DX ADDL R15, BP ADDQ $64, SI CMPQ SI, DI JB loop end: MOVQ dig+0(FP), DI MOVL AX, (0*4)(DI) MOVL BX, (1*4)(DI) MOVL CX, (2*4)(DI) MOVL DX, (3*4)(DI)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 31.5K bytes - Viewed (0) -
test/codegen/arithmetic.go
// amd64:`SUBQ\s[A-Z]+,\s24\([A-Z]+\)` arr[3] -= b // 386:`DECL\s16\([A-Z]+\)` arr[4]-- // 386:`ADDL\s[$]-20,\s20\([A-Z]+\)` arr[5] -= 20 // 386:`SUBL\s\([A-Z]+\)\([A-Z]+\*4\),\s[A-Z]+` ef -= arr[b] // 386:`SUBL\s[A-Z]+,\s\([A-Z]+\)\([A-Z]+\*4\)` arr[c] -= b // 386:`ADDL\s[$]-15,\s\([A-Z]+\)\([A-Z]+\*4\)` arr[d] -= 15 // 386:`DECL\s\([A-Z]+\)\([A-Z]+\*4\)` arr[b]-- // amd64:`DECQ\s64\([A-Z]+\)`
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 15:28:00 UTC 2024 - 15.2K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_386.s
// Atomically: // *val += delta; // return *val; TEXT ·Xadd(SB), NOSPLIT, $0-12 MOVL ptr+0(FP), BX MOVL delta+4(FP), AX MOVL AX, CX LOCK XADDL AX, 0(BX) ADDL CX, AX MOVL AX, ret+8(FP) RET TEXT ·Xadd64(SB), NOSPLIT, $0-20 NO_LOCAL_POINTERS // no XADDQ so use CMPXCHG8B loop MOVL ptr+0(FP), BP TESTL $7, BP JZ 2(PC) CALL ·panicUnaligned(SB)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 6.5K bytes - Viewed (0) -
src/runtime/asm_386.s
/* copy return values back */ \ MOVL stackArgsType+0(FP), DX; \ MOVL stackArgs+8(FP), DI; \ MOVL stackArgsSize+12(FP), CX; \ MOVL stackRetOffset+16(FP), BX; \ MOVL SP, SI; \ ADDL BX, DI; \ ADDL BX, SI; \ SUBL BX, CX; \ CALL callRet<>(SB); \ RET // callRet copies return values back at the end of call*. This is a // separate function so it can allocate stack space for the arguments
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Mar 15 15:45:13 UTC 2024 - 43.1K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_amd64.s
// uint32 Xadd(uint32 volatile *val, int32 delta) // Atomically: // *val += delta; // return *val; TEXT ·Xadd(SB), NOSPLIT, $0-20 MOVQ ptr+0(FP), BX MOVL delta+8(FP), AX MOVL AX, CX LOCK XADDL AX, 0(BX) ADDL CX, AX MOVL AX, ret+16(FP) RET // uint64 Xadd64(uint64 volatile *val, int64 delta) // Atomically: // *val += delta; // return *val; TEXT ·Xadd64(SB), NOSPLIT, $0-24 MOVQ ptr+0(FP), BX
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 5.2K bytes - Viewed (0)