- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 54 for x8 (0.04 sec)
-
src/internal/bytealg/compare_riscv64.s
AND $7, X10, X7 AND $7, X12, X8 BNE X7, X8, check8_unaligned BEQZ X7, compare32 // Check one byte at a time until we reach 8 byte alignment. SUB X7, X0, X7 ADD $8, X7, X7 SUB X7, X5, X5 align: SUB $1, X7 MOVBU 0(X10), X8 MOVBU 0(X12), X9 BNE X8, X9, cmp ADD $1, X10 ADD $1, X12 BNEZ X7, align check32: // X6 contains $32
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Nov 09 13:57:06 UTC 2023 - 3.9K bytes - Viewed (0) -
src/crypto/sha512/sha512block_riscv64.s
MOVBU ((index*8)+1)(X29), X6; \ MOVBU ((index*8)+2)(X29), X7; \ MOVBU ((index*8)+3)(X29), X8; \ SLL $56, X5; \ SLL $48, X6; \ OR X5, X6, X5; \ SLL $40, X7; \ OR X5, X7, X5; \ SLL $32, X8; \ OR X5, X8, X5; \ MOVBU ((index*8)+4)(X29), X9; \ MOVBU ((index*8)+5)(X29), X6; \ MOVBU ((index*8)+6)(X29), X7; \ MOVBU ((index*8)+7)(X29), X8; \ SLL $24, X9; \ OR X5, X9, X5; \ SLL $16, X6; \ OR X5, X6, X5; \
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Mar 07 14:57:07 UTC 2024 - 9.1K bytes - Viewed (0) -
test/inline_math_bits_rotate.go
// Test that inlining of math/bits.RotateLeft* treats those calls as intrinsics. package p import "math/bits" var ( x8 uint8 x16 uint16 x32 uint32 x64 uint64 x uint ) func f() { // ERROR "can inline f" x8 = bits.RotateLeft8(x8, 1) x16 = bits.RotateLeft16(x16, 1) x32 = bits.RotateLeft32(x32, 1) x64 = bits.RotateLeft64(x64, 1) x = bits.RotateLeft(x, 1)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 19 23:33:25 UTC 2023 - 571 bytes - Viewed (0) -
src/cmd/asm/internal/asm/testdata/amd64enc_extra.s
VMOVHPD 7(DX), X8, X8 // c539164207 or 6271bd28168207000000 or 6271bd48168207000000 VMOVHPD -15(R11)(CX*1), X8, X8 // c4413916440bf1 or 6251bd2816840bf1ffffff or 6251bd4816840bf1ffffff VMOVHPD (SP)(AX*2), X8, X8 // c539160444 or 6271bd28160444 or 6271bd48160444 VMOVHPD (AX), X20, X8 // 6271dd001600 or 6271dd201600 or 6271dd401600
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 11 18:32:50 UTC 2023 - 57.6K bytes - Viewed (0) -
src/crypto/internal/bigmod/nat_riscv64.s
MOV 1*8(X5), X13 // z[1] MOV 2*8(X5), X16 // z[2] MOV 3*8(X5), X19 // z[3] MOV 0*8(X7), X8 // x[0] MOV 1*8(X7), X11 // x[1] MOV 2*8(X7), X14 // x[2] MOV 3*8(X7), X17 // x[3] MULHU X8, X6, X9 // z_hi[0] = x[0] * y MUL X8, X6, X8 // z_lo[0] = x[0] * y ADD X8, X10, X21 // z_lo[0] = x[0] * y + z[0] SLTU X8, X21, X22 ADD X9, X22, X9 // z_hi[0] = x[0] * y + z[0] ADD X21, X29, X10 // z_lo[0] = x[0] * y + z[0] + c
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Nov 09 13:57:06 UTC 2023 - 2.2K bytes - Viewed (0) -
src/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s
VREPF $3, KEY0, X7 VREPF $0, KEY1, X8 VREPF $1, KEY1, X9 VREPF $2, KEY1, X10 VREPF $3, KEY1, X11 VLR CTR, X12 VREPF $1, NONCE, X13 VREPF $2, NONCE, X14 VREPF $3, NONCE, X15 MOVD $(NUM_ROUNDS/2), R1 loop: ROUND4(X0, X4, X12, X8, X1, X5, X13, X9, X2, X6, X14, X10, X3, X7, X15, X11) ROUND4(X0, X5, X15, X10, X1, X6, X12, X11, X2, X7, X13, X8, X3, X4, X14, X9) ADD $-1, R1
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 19 23:33:33 UTC 2023 - 5.3K bytes - Viewed (0) -
src/runtime/cgo/gcc_riscv64.S
* * Calling into the gc tool chain, where all registers are caller save. * Called from standard RISCV ELF psABI, where x8-x9, x18-x27, f8-f9 and * f18-f27 are callee-save, so they must be saved explicitly, along with * x1 (LR). */ .globl crosscall1 crosscall1: sd x1, -200(sp) addi sp, sp, -200 sd x8, 8(sp) sd x9, 16(sp) sd x18, 24(sp) sd x19, 32(sp) sd x20, 40(sp) sd x21, 48(sp) sd x22, 56(sp)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Dec 05 16:41:48 UTC 2022 - 1.6K bytes - Viewed (0) -
src/vendor/golang.org/x/crypto/chacha20/chacha_generic.go
x1, x6, x11, x12 := quarterRound(s.p1, s.p6, s.p11, fcr12) x2, x7, x8, x13 := quarterRound(s.p2, s.p7, fcr8, s.p13) x3, x4, x9, x14 := quarterRound(s.p3, fcr4, s.p9, s.p14) // The remaining 18 rounds. for i := 0; i < 9; i++ { // Column round. x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12) x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Oct 26 00:11:50 UTC 2022 - 13.9K bytes - Viewed (0) -
src/crypto/internal/edwards25519/scalar_fiat.go
var x5 uint64 var x6 uint64 x5, x6 = bits.Sub64(arg1[2], arg2[2], uint64(fiatScalarUint1(x4))) var x7 uint64 var x8 uint64 x7, x8 = bits.Sub64(arg1[3], arg2[3], uint64(fiatScalarUint1(x6))) var x9 uint64 fiatScalarCmovznzU64(&x9, fiatScalarUint1(x8), uint64(0x0), 0xffffffffffffffff) var x10 uint64 var x11 uint64 x10, x11 = bits.Add64(x1, (x9 & 0x5812631a5cf5d3ed), uint64(0x0)) var x12 uint64
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Nov 10 18:45:00 UTC 2022 - 35.6K bytes - Viewed (0) -
src/cmd/cgo/internal/test/issue26213/test26213.go
_ = x2 var x3 C.jthrowable = 0 _ = x3 var x4 C.jstring = 0 _ = x4 var x5 C.jarray = 0 _ = x5 var x6 C.jbooleanArray = 0 _ = x6 var x7 C.jbyteArray = 0 _ = x7 var x8 C.jcharArray = 0 _ = x8 var x9 C.jshortArray = 0 _ = x9 var x10 C.jintArray = 0 _ = x10 var x11 C.jlongArray = 0 _ = x11 var x12 C.jfloatArray = 0 _ = x12 var x13 C.jdoubleArray = 0 _ = x13
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 12 12:00:02 UTC 2023 - 835 bytes - Viewed (0)