- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 163 for r6 (0.03 sec)
-
src/runtime/memmove_loong64.s
ADDV $-7, R9, R6 // R6 is end pointer-7 PCALIGN $16 SGTU R6, R4, R8 BEQ R8, out MOVV (R5), R7 ADDV $8, R5 MOVV R7, (R4) ADDV $8, R4 JMP -6(PC) out: BEQ R4, R9, done MOVB (R5), R7 ADDV $1, R5 MOVB R7, (R4) ADDV $1, R4 JMP -5(PC) done: RET backward: ADDV R6, R5 // from-end pointer ADDV R4, R6, R9 // to-end pointer
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 13 15:04:25 UTC 2024 - 1.8K bytes - Viewed (0) -
src/cmd/asm/internal/asm/testdata/arm64.s
LDADDLD R5, (R6), ZR // df0065f8 LDADDLW R5, (R6), ZR // df0065b8 LDADDLH R5, (R6), ZR // df006578 LDADDLB R5, (R6), ZR // df006538 LDCLRD R5, (R6), ZR // df1025f8 LDCLRW R5, (R6), ZR // df1025b8 LDCLRH R5, (R6), ZR // df102578
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Dec 08 03:28:17 UTC 2023 - 94.9K bytes - Viewed (0) -
src/crypto/internal/bigmod/nat_s390x.s
BR E6 A6: MOVD (R8)(R1*1), R6 MULHDU R9, R6 MOVD (R2)(R1*1), R10 ADDC R10, R11 // add to low order bits ADDE R0, R6 ADDC R4, R11 ADDE R0, R6 MOVD R6, R4 MOVD R11, (R2)(R1*1) MOVD (8)(R8)(R1*1), R6 MULHDU R9, R6 MOVD (8)(R2)(R1*1), R10 ADDC R10, R11 // add to low order bits ADDE R0, R6 ADDC R4, R11 ADDE R0, R6 MOVD R6, R4
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 24 22:37:58 UTC 2023 - 1.6K bytes - Viewed (0) -
src/crypto/subtle/xor_ppc64x.s
ADD $32, R8 ADD $-32, R6 CMP R6, $8 BLE small // Case for 8 <= n < 32 bytes // Process 16 bytes if available xor16: CMP R6, $16 BLT xor8 LXVD2X (R4)(R8), VS32 LXVD2X (R5)(R8), VS33 XXLXOR VS32, VS33, VS32 STXVD2X VS32, (R3)(R8) ADD $16, R8 ADD $-16, R6 small: CMP R6, $0 BC 12,2,LR // BEQLR xor8: #ifdef GOPPC64_power10 SLD $56,R6,R17 ADD R4,R8,R18 ADD R5,R8,R19
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 18:17:17 UTC 2024 - 2.9K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_s390x.s
MOVD ptr+0(FP), R4 MOVW new+8(FP), R3 MOVW (R4), R6 repeat: CS R6, R3, (R4) // if R6==(R4) then (R4)=R3 else R6=(R4) BNE repeat MOVW R6, ret+16(FP) RET // func Xchg64(ptr *uint64, new uint64) uint64 TEXT ·Xchg64(SB), NOSPLIT, $0-24 MOVD ptr+0(FP), R4 MOVD new+8(FP), R3 MOVD (R4), R6 repeat: CSG R6, R3, (R4) // if R6==(R4) then (R4)=R3 else R6=(R4) BNE repeat MOVD R6, ret+16(FP) RET
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 7.1K bytes - Viewed (0) -
src/math/big/arith_arm64.s
MOVD.W -8(R2), R6 LSR R4, R6, R5 // return value LSL R3, R6, R8 // x[i] << s SUB $1, R1 one: TBZ $0, R1, two MOVD.W -8(R2), R6 LSR R4, R6, R7 ORR R8, R7 LSL R3, R6, R8 SUB $1, R1 MOVD.W R7, -8(R0) two: TBZ $1, R1, loop LDP.W -16(R2), (R6, R7) LSR R4, R7, R10 ORR R8, R10 LSL R3, R7 LSR R4, R6, R9 ORR R7, R9 LSL R3, R6, R8 SUB $2, R1
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 19 23:33:27 UTC 2023 - 11.8K bytes - Viewed (0) -
src/runtime/sys_aix_ppc64.s
CMP $0, R6 BEQ exit // g.m == nil // restore libcall MOVD 96(R1), R7 MOVD R7, (m_libcall+libcall_fn)(R6) MOVD 104(R1), R7 MOVD R7, (m_libcall+libcall_args)(R6) MOVD 112(R1), R7 MOVD R7, (m_libcall+libcall_n)(R6) MOVD 120(R1), R7 MOVD R7, (m_libcall+libcall_r1)(R6) MOVD 128(R1), R7 MOVD R7, (m_libcall+libcall_r2)(R6) // restore errno MOVD (m_mOS+mOS_perrno)(R6), R7
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 21 19:29:00 UTC 2023 - 7.4K bytes - Viewed (0) -
src/internal/bytealg/equal_arm64.s
MOVD (R1), R5 EOR R4, R5 CBNZ R5, not_equal SUB $8, R2, R6 // offset of the last 8 bytes MOVD (R0)(R6), R4 MOVD (R1)(R6), R5 EOR R4, R5 CBNZ R5, not_equal B equal lt_8: TBZ $2, R2, lt_4 MOVWU (R0), R4 MOVWU (R1), R5 EOR R4, R5 CBNZ R5, not_equal SUB $4, R2, R6 // offset of the last 4 bytes MOVWU (R0)(R6), R4 MOVWU (R1)(R6), R5 EOR R4, R5 CBNZ R5, not_equal B equal lt_4:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Jan 24 16:07:25 UTC 2024 - 2.5K bytes - Viewed (0) -
src/internal/bytealg/equal_ppc64x.s
// alignment requirements. ANDCC $PAGE_OFFSET, R8, R6 // &sX & PAGE_OFFSET ANDCC $PAGE_OFFSET, R4, R9 SUBC R5, $8, R12 // 8-len SLD $3, R12, R14 // (8-len)*8 CMPU R6, R12, CR1 // Enough bytes lower in the page to load lower? CMPU R9, R12, CR0 SUB R12, R8, R6 // compute lower load address SUB R12, R4, R9 ISEL CR1LT, R8, R6, R8 // R8 = R6 < 0 ? R8 (&s1) : R6 (&s1 - (8-len)) ISEL CR0LT, R4, R9, R4 // Similar for s2
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 21 16:47:45 UTC 2023 - 4.9K bytes - Viewed (0) -
src/cmd/asm/internal/asm/testdata/ppc64.s
RLWNM $3, R4, $29, $31, R6 // 54861f7e RLWNM $0, R4, $29, $31, R6 // 5486077e RLWNM R0, R4, $29, $31, R6 // 5c86077e RLWNM R3, R4, $7, R6 // 5c861f7e RLWNM R3, R4, $29, $31, R6 // 5c861f7e RLWNMCC $3, R4, $7, R6 // 54861f7f RLWNMCC $3, R4, $29, $31, R6 // 54861f7f RLWNMCC R3, R4, $7, R6 // 5c861f7f RLWNMCC R3, R4, $29, $31, R6 // 5c861f7f RLDMI $0, R4, $7, R6 // 7886076c
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 21:53:50 UTC 2024 - 50.2K bytes - Viewed (0)