- Sort Score
- Result 10 results
- Languages All
Results 61 - 70 of 321 for R4 (0.03 sec)
-
src/math/big/arith_ppc64x.s
final: // Capture CA SUBE R4, R4 NEG R4, R4 done: MOVD R4, c+56(FP) RET //func shlVU(z, x []Word, s uint) (c Word) TEXT ·shlVU(SB), NOSPLIT, $0 MOVD z+0(FP), R3 MOVD x+24(FP), R6 MOVD s+48(FP), R9 MOVD z_len+8(FP), R4 MOVD x_len+32(FP), R7 CMP R9, $0 // s==0 copy(z,x) BEQ zeroshift CMP R4, $0 // len(z)==0 return BEQ done
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 18:17:17 UTC 2024 - 16.8K bytes - Viewed (0) -
src/internal/cpu/cpu_s390x.s
MOVD $ret+0(FP), R1 // address of 16-byte return value KMCTR R2, R4, R4 // cipher message with counter (KMCTR) RET // func kmaQuery() queryResult TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16 MOVD $0, R0 // set function code to 0 (KMA-Query) MOVD $ret+0(FP), R1 // address of 16-byte return value KMA R2, R6, R4 // cipher message with authentication (KMA) RET // func kimdQuery() queryResult
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Nov 22 03:55:32 UTC 2023 - 2.2K bytes - Viewed (0) -
src/runtime/memclr_arm64.s
RET zero_by_16: // n greater than 16 bytes, check if the start address is aligned NEG R0, R4 ANDS $15, R4, R4 // Try zeroing using zva if the start address is aligned with 16 BEQ try_zva // Non-aligned store STP (ZR, ZR), (R0) // Make the destination aligned SUB R4, R1, R1 ADD R4, R0, R0 B try_zva tail_maybe_long: CMP $64, R1 BHS no_zva tail63: ANDS $48, R1, R3
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Mar 18 18:26:13 UTC 2022 - 3.6K bytes - Viewed (0) -
src/syscall/asm_linux_loong64.s
TEXT ·rawVforkSyscall(SB),NOSPLIT,$0-48 MOVV a1+8(FP), R4 MOVV a2+16(FP), R5 MOVV a3+24(FP), R6 MOVV $0, R7 MOVV $0, R8 MOVV $0, R9 MOVV trap+0(FP), R11 // syscall entry SYSCALL MOVW $-4096, R12 BGEU R12, R4, ok MOVV $-1, R12 MOVV R12, r1+32(FP) // r1 SUBVU R4, R0, R4 MOVV R4, err+40(FP) // errno RET ok: MOVV R4, r1+32(FP) // r1 MOVV R0, err+40(FP) // errno RET
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Sep 07 19:11:15 UTC 2023 - 947 bytes - Viewed (0) -
src/runtime/asm_loong64.s
JAL runtime·save_g(SB) MOVV gobuf_sp(R4), R3 MOVV gobuf_lr(R4), R1 MOVV gobuf_ret(R4), R19 MOVV gobuf_ctxt(R4), REGCTXT MOVV R0, gobuf_sp(R4) MOVV R0, gobuf_ret(R4) MOVV R0, gobuf_lr(R4) MOVV R0, gobuf_ctxt(R4) MOVV gobuf_pc(R4), R6 JMP (R6) // void mcall(fn func(*g)) // Switch to m->g0's stack, call fn(g). // Fn must never return. It should gogo(&g->sched) // to keep running g.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 13 15:04:25 UTC 2024 - 26.5K bytes - Viewed (0) -
src/cmd/internal/obj/ppc64/doc.go
Examples: MOVD (R3), R4 <=> ld r4,0(r3) MOVW (R3), R4 <=> lwa r4,0(r3) MOVWZU 4(R3), R4 <=> lwzu r4,4(r3) MOVWZ (R3+R5), R4 <=> lwzx r4,r3,r5 MOVHZ (R3), R4 <=> lhz r4,0(r3) MOVHU 2(R3), R4 <=> lhau r4,2(r3) MOVBZ (R3), R4 <=> lbz r4,0(r3) MOVD R4,(R3) <=> std r4,0(r3) MOVW R4,(R3) <=> stw r4,0(r3) MOVW R4,(R3+R5) <=> stwx r4,r3,r5 MOVWU R4,4(R3) <=> stwu r4,4(r3)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 21 16:47:45 UTC 2023 - 11.3K bytes - Viewed (0) -
src/runtime/rt0_netbsd_arm64.s
// Synchronous initialization. MOVD $runtime·libpreinit(SB), R4 BL (R4) // Create a new thread to do the runtime initialization and return. MOVD _cgo_sys_thread_create(SB), R4 CBZ R4, nocgo MOVD $_rt0_arm64_netbsd_lib_go(SB), R0 MOVD $0, R1 SUB $16, RSP // reserve 16 bytes for sp-8 where fp may be saved. BL (R4) ADD $16, RSP B restore nocgo:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Mar 30 01:28:43 UTC 2022 - 1.8K bytes - Viewed (0) -
src/crypto/internal/edwards25519/field/fe_generic.go
r3 = addMul64(r3, a1, b2) r3 = addMul64(r3, a2, b1) r3 = addMul64(r3, a3, b0) r3 = addMul64(r3, a4_19, b4) // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 r4 := mul64(a0, b4) r4 = addMul64(r4, a1, b3) r4 = addMul64(r4, a2, b2) r4 = addMul64(r4, a3, b1) r4 = addMul64(r4, a4, b0) // After the multiplication, we need to reduce (carry) the five coefficients
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Sep 27 01:16:19 UTC 2023 - 8.5K bytes - Viewed (0) -
src/crypto/subtle/xor_ppc64x.s
// Case for >= 64 bytes // Process 64 bytes per iteration // Load 4 vectors of a and b // XOR the corresponding vectors // from a and b and store the result loop64: LXVD2X (R4)(R8), VS32 LXVD2X (R4)(R10), VS34 LXVD2X (R4)(R14), VS36 LXVD2X (R4)(R15), VS38 LXVD2X (R5)(R8), VS33 LXVD2X (R5)(R10), VS35 LXVD2X (R5)(R14), VS37 LXVD2X (R5)(R15), VS39 XXLXOR VS32, VS33, VS32 XXLXOR VS34, VS35, VS34
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 18:17:17 UTC 2024 - 2.9K bytes - Viewed (0) -
src/runtime/rt0_openbsd_arm64.s
// Synchronous initialization. MOVD $runtime·libpreinit(SB), R4 BL (R4) // Create a new thread to do the runtime initialization and return. MOVD _cgo_sys_thread_create(SB), R4 CBZ R4, nocgo MOVD $_rt0_arm64_openbsd_lib_go(SB), R0 MOVD $0, R1 SUB $16, RSP // reserve 16 bytes for sp-8 where fp may be saved. BL (R4) ADD $16, RSP B restore nocgo:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Mar 30 01:28:43 UTC 2022 - 2K bytes - Viewed (0)