- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 233 for r6 (0.14 sec)
-
src/runtime/memmove_s390x.s
MOVH R7, 4(R6) MOVB R8, 6(R6) RET move8to11: CMPBNE R5, $8, move9 MOVD 0(R4), R3 MOVD R3, 0(R6) RET move9: CMPBNE R5, $9, move10 MOVD 0(R4), R3 MOVB 8(R4), R7 MOVD R3, 0(R6) MOVB R7, 8(R6) RET move10: CMPBNE R5, $10, move11 MOVD 0(R4), R3 MOVH 8(R4), R7 MOVD R3, 0(R6) MOVH R7, 8(R6) RET move11: MOVD 0(R4), R3 MOVH 8(R4), R7
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Jan 22 18:54:48 UTC 2020 - 2.9K bytes - Viewed (0) -
src/internal/bytealg/compare_mipsx.s
ADDU R3, R8 // R3 is current byte in a, R8 is last byte in a to compare loop: BEQ R3, R8, samebytes MOVBU (R3), R6 ADDU $1, R3 MOVBU (R4), R7 ADDU $1, R4 BEQ R6, R7 , loop SGTU R6, R7, R8 MOVW $-1, R6 CMOVZ R8, R6, R8 JMP cmp_ret samebytes: SGTU R1, R2, R6 SGTU R2, R1, R7 SUBU R7, R6, R8 cmp_ret: MOVW R8, ret+24(FP) RET TEXT runtime·cmpstring(SB),NOSPLIT,$0-20 MOVW a_base+0(FP), R3
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat Nov 06 10:24:44 UTC 2021 - 1.4K bytes - Viewed (0) -
src/internal/bytealg/compare_loong64.s
// make sure both a and b are aligned. OR R4, R6, R15 AND $7, R15 BNE R0, R15, byte_loop PCALIGN $16 chunk16_loop: BEQ R0, R14, byte_loop MOVV (R4), R8 MOVV (R6), R9 BNE R8, R9, byte_loop MOVV 8(R4), R16 MOVV 8(R6), R17 ADDV $16, R4 ADDV $16, R6 SUBVU $1, R14 BEQ R16, R17, chunk16_loop SUBV $8, R4 SUBV $8, R6 byte_loop: BEQ R4, R12, samebytes MOVBU (R4), R8
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 13 15:04:25 UTC 2024 - 1.7K bytes - Viewed (0) -
src/internal/bytealg/compare_ppc64x.s
// incoming: // R3 a addr -> R5 // R4 a len -> R3 // R5 b addr -> R6 // R6 b len -> R4 // // on entry to cmpbody: // R3 compare value if compared length is same. // R5 a addr // R6 b addr // R9 min(len(a),len(b)) SETB_INIT() CMP R4,R6,CR0 CMP R3,R5,CR7 ISEL CR0LT,R4,R6,R9 MOVD R5,R6 MOVD R3,R5 SETB_CR0(R3) BC $12,30,LR // beqlr cr7 BR cmpbody<>(SB)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Aug 28 17:33:20 UTC 2023 - 6.7K bytes - Viewed (0) -
src/runtime/memmove_mipsx.s
SGTU R1, R2, R6 BNE R6, backward // if less than 4 bytes, use byte by byte copying SGTU $4, R3, R6 BNE R6, f_small_copy // align destination to 4 bytes AND $3, R1, R6 BEQ R6, f_dest_aligned SUBU R1, R0, R6 AND $3, R6 MOVWHI 0(R2), R7 SUBU R6, R3 MOVWLO 3(R2), R7 ADDU R6, R2 MOVWHI R7, 0(R1) ADDU R6, R1 f_dest_aligned: AND $31, R3, R7
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat Nov 06 10:24:44 UTC 2021 - 4.4K bytes - Viewed (0) -
src/internal/bytealg/compare_arm64.s
CMP R4, R5 BNE cmp CMP R8, R9 BNE cmpnext CMP R10, R0 BNE chunk16_loop AND $0xf, R6, R6 CBZ R6, samebytes SUBS $8, R6 BLT tail // the length of tail > 8 bytes MOVD.P 8(R0), R4 MOVD.P 8(R2), R5 CMP R4, R5 BNE cmp SUB $8, R6 // compare last 8 bytes tail: MOVD (R0)(R6), R4 MOVD (R2)(R6), R5 CMP R4, R5 BEQ samebytes cmp: REV R4, R4 REV R5, R5 CMP R4, R5 ret:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Mar 18 18:26:13 UTC 2022 - 2.1K bytes - Viewed (0) -
src/math/big/arith_s390x.s
A6: MOVD (R8)(R1*1), R6 MULHDU R9, R6 MOVD (R2)(R1*1), R10 ADDC R10, R11 // add to low order bits ADDE R0, R6 ADDC R4, R11 ADDE R0, R6 MOVD R6, R4 MOVD R11, (R2)(R1*1) MOVD (8)(R8)(R1*1), R6 MULHDU R9, R6 MOVD (8)(R2)(R1*1), R10 ADDC R10, R11 // add to low order bits ADDE R0, R6 ADDC R4, R11 ADDE R0, R6 MOVD R6, R4
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 19 23:33:27 UTC 2023 - 20.3K bytes - Viewed (0) -
src/math/big/arith_arm.s
SUB R3, R4 MOVW $0, R7 MOVW.W -4(R2), R6 MOVW R6<<R3, R7 MOVW R6>>R4, R6 MOVW R6, c+28(FP) B E7 L7: MOVW.W -4(R2), R6 ORR R6>>R4, R7 MOVW.W R7, -4(R5) MOVW R6<<R3, R7 E7: TEQ R1, R5 BNE L7 MOVW R7, -4(R5) RET Y7: // copy loop, because shift 0 == shift 32 MOVW.W -4(R2), R6 MOVW.W R6, -4(R5) TEQ R1, R5 BNE Y7 X7:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 19 23:33:27 UTC 2023 - 4K bytes - Viewed (0) -
src/crypto/md5/md5block_arm64.s
ROUND3(R4,R5,R6,R7, 8,0xfffa3942, 4); ROUND3(R7,R4,R5,R6,11,0x8771f681,11); ROUND3(R6,R7,R4,R5,14,0x6d9d6122,16); ROUND3(R5,R6,R7,R4, 1,0xfde5380c,23); ROUND3(R4,R5,R6,R7, 4,0xa4beea44, 4); ROUND3(R7,R4,R5,R6, 7,0x4bdecfa9,11); ROUND3(R6,R7,R4,R5,10,0xf6bb4b60,16); ROUND3(R5,R6,R7,R4,13,0xbebfbc70,23); ROUND3(R4,R5,R6,R7, 0,0x289b7ec6, 4); ROUND3(R7,R4,R5,R6, 3,0xeaa127fa,11); ROUND3(R6,R7,R4,R5, 6,0xd4ef3085,16);
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 4.1K bytes - Viewed (0) -
src/runtime/time_windows_arm.s
MULLU R1,R3,(R6,R5) // R7:R6:R5 = R2:R1 * R3 MOVW $0,R7 MULALU R2,R3,(R7,R6) // unscale by discarding low 32 bits, shifting the rest by 29 MOVW R6>>29,R6 // R7:R6 = (R7:R6:R5 >> 61) ORR R7<<3,R6 MOVW R7>>29,R7 // subtract (10**9 * sec) from nsec to get nanosecond remainder MOVW $1000000000, R5 // 10**9 MULLU R6,R5,(R9,R8) // R9:R8 = R7:R6 * R5 MULA R7,R5,R9,R9
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Sep 07 17:19:45 UTC 2023 - 2K bytes - Viewed (0)