- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 163 for r6 (0.02 sec)
-
src/internal/bytealg/compare_loong64.s
// make sure both a and b are aligned. OR R4, R6, R15 AND $7, R15 BNE R0, R15, byte_loop PCALIGN $16 chunk16_loop: BEQ R0, R14, byte_loop MOVV (R4), R8 MOVV (R6), R9 BNE R8, R9, byte_loop MOVV 8(R4), R16 MOVV 8(R6), R17 ADDV $16, R4 ADDV $16, R6 SUBVU $1, R14 BEQ R16, R17, chunk16_loop SUBV $8, R4 SUBV $8, R6 byte_loop: BEQ R4, R12, samebytes MOVBU (R4), R8
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 13 15:04:25 UTC 2024 - 1.7K bytes - Viewed (0) -
src/internal/bytealg/compare_ppc64x.s
// incoming: // R3 a addr -> R5 // R4 a len -> R3 // R5 b addr -> R6 // R6 b len -> R4 // // on entry to cmpbody: // R3 compare value if compared length is same. // R5 a addr // R6 b addr // R9 min(len(a),len(b)) SETB_INIT() CMP R4,R6,CR0 CMP R3,R5,CR7 ISEL CR0LT,R4,R6,R9 MOVD R5,R6 MOVD R3,R5 SETB_CR0(R3) BC $12,30,LR // beqlr cr7 BR cmpbody<>(SB)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Aug 28 17:33:20 UTC 2023 - 6.7K bytes - Viewed (0) -
src/math/big/arith_s390x.s
A6: MOVD (R8)(R1*1), R6 MULHDU R9, R6 MOVD (R2)(R1*1), R10 ADDC R10, R11 // add to low order bits ADDE R0, R6 ADDC R4, R11 ADDE R0, R6 MOVD R6, R4 MOVD R11, (R2)(R1*1) MOVD (8)(R8)(R1*1), R6 MULHDU R9, R6 MOVD (8)(R2)(R1*1), R10 ADDC R10, R11 // add to low order bits ADDE R0, R6 ADDC R4, R11 ADDE R0, R6 MOVD R6, R4
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 19 23:33:27 UTC 2023 - 20.3K bytes - Viewed (0) -
src/math/big/arith_arm.s
SUB R3, R4 MOVW $0, R7 MOVW.W -4(R2), R6 MOVW R6<<R3, R7 MOVW R6>>R4, R6 MOVW R6, c+28(FP) B E7 L7: MOVW.W -4(R2), R6 ORR R6>>R4, R7 MOVW.W R7, -4(R5) MOVW R6<<R3, R7 E7: TEQ R1, R5 BNE L7 MOVW R7, -4(R5) RET Y7: // copy loop, because shift 0 == shift 32 MOVW.W -4(R2), R6 MOVW.W R6, -4(R5) TEQ R1, R5 BNE Y7 X7:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 19 23:33:27 UTC 2023 - 4K bytes - Viewed (0) -
src/crypto/md5/md5block_arm64.s
ROUND3(R4,R5,R6,R7, 8,0xfffa3942, 4); ROUND3(R7,R4,R5,R6,11,0x8771f681,11); ROUND3(R6,R7,R4,R5,14,0x6d9d6122,16); ROUND3(R5,R6,R7,R4, 1,0xfde5380c,23); ROUND3(R4,R5,R6,R7, 4,0xa4beea44, 4); ROUND3(R7,R4,R5,R6, 7,0x4bdecfa9,11); ROUND3(R6,R7,R4,R5,10,0xf6bb4b60,16); ROUND3(R5,R6,R7,R4,13,0xbebfbc70,23); ROUND3(R4,R5,R6,R7, 0,0x289b7ec6, 4); ROUND3(R7,R4,R5,R6, 3,0xeaa127fa,11); ROUND3(R6,R7,R4,R5, 6,0xd4ef3085,16);
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 4.1K bytes - Viewed (0) -
src/runtime/time_windows_arm.s
MULLU R1,R3,(R6,R5) // R7:R6:R5 = R2:R1 * R3 MOVW $0,R7 MULALU R2,R3,(R7,R6) // unscale by discarding low 32 bits, shifting the rest by 29 MOVW R6>>29,R6 // R7:R6 = (R7:R6:R5 >> 61) ORR R7<<3,R6 MOVW R7>>29,R7 // subtract (10**9 * sec) from nsec to get nanosecond remainder MOVW $1000000000, R5 // 10**9 MULLU R6,R5,(R9,R8) // R9:R8 = R7:R6 * R5 MULA R7,R5,R9,R9
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Sep 07 17:19:45 UTC 2023 - 2K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_ppc64x.s
MOVD ptr+0(FP), R3 MOVBZ val+8(FP), R4 LWSYNC again: LBAR (R3), R6 OR R4, R6 STBCCC R6, (R3) BNE again RET // void ·And8(byte volatile*, byte); TEXT ·And8(SB), NOSPLIT, $0-9 MOVD ptr+0(FP), R3 MOVBZ val+8(FP), R4 LWSYNC again: LBAR (R3), R6 AND R4, R6 STBCCC R6, (R3) BNE again RET // func Or(addr *uint32, v uint32)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 7.5K bytes - Viewed (0) -
src/cmd/asm/internal/asm/testdata/arm64error.s
LDADDALW R5, (R6), RSP // ERROR "illegal combination" LDADDALH R5, (R6), RSP // ERROR "illegal combination" LDADDALB R5, (R6), RSP // ERROR "illegal combination" LDADDD R5, (R6), RSP // ERROR "illegal combination" LDADDW R5, (R6), RSP // ERROR "illegal combination"
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Dec 08 03:28:17 UTC 2023 - 37.8K bytes - Viewed (0) -
src/cmd/asm/internal/asm/testdata/loong64enc1.s
MULU R4, R5, R6 // a6101c00 MULH R4, R5 // a5901c00 MULH R4, R5, R6 // a6901c00 MULHU R4, R5 // a5101d00 MULHU R4, R5, R6 // a6101d00 REM R4, R5 // a5902000 REM R4, R5, R6 // a6902000 REMU R4, R5 // a5902100 REMU R4, R5, R6 // a6902100 DIV R4, R5 // a5102000 DIV R4, R5, R6 // a6102000 DIVU R4, R5 // a5102100 DIVU R4, R5, R6 // a6102100
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 02:04:54 UTC 2024 - 8.2K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_loong64.s
MOVV ptr+0(FP), R4 MOVW delta+8(FP), R5 DBAR LL (R4), R6 ADDU R6, R5, R7 MOVV R7, R6 SC R7, (R4) BEQ R7, -4(PC) MOVW R6, ret+16(FP) DBAR RET TEXT ·Xadd64(SB), NOSPLIT, $0-24 MOVV ptr+0(FP), R4 MOVV delta+8(FP), R5 DBAR LLV (R4), R6 ADDVU R6, R5, R7 MOVV R7, R6 SCV R7, (R4) BEQ R7, -4(PC) MOVV R6, ret+16(FP) DBAR RET TEXT ·Xchg(SB), NOSPLIT, $0-20
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 6.3K bytes - Viewed (0)