- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 87 for x8 (0.02 sec)
-
src/internal/bytealg/compare_riscv64.s
AND $7, X10, X7 AND $7, X12, X8 BNE X7, X8, check8_unaligned BEQZ X7, compare32 // Check one byte at a time until we reach 8 byte alignment. SUB X7, X0, X7 ADD $8, X7, X7 SUB X7, X5, X5 align: SUB $1, X7 MOVBU 0(X10), X8 MOVBU 0(X12), X9 BNE X8, X9, cmp ADD $1, X10 ADD $1, X12 BNEZ X7, align check32: // X6 contains $32
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Nov 09 13:57:06 UTC 2023 - 3.9K bytes - Viewed (0) -
src/cmd/asm/internal/asm/testdata/avx512enc/avx512er.s
VRCP28SS X26, X2, K3, X8 // 62126d0bcbc2 or 62126d2bcbc2 or 62126d4bcbc2 VRCP28SS X19, X2, K3, X8 // 62326d0bcbc3 or 62326d2bcbc3 or 62326d4bcbc3 VRCP28SS X0, X2, K3, X8 // 62726d0bcbc0 or 62726d2bcbc0 or 62726d4bcbc0
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 22 14:57:15 UTC 2018 - 28.4K bytes - Viewed (0) -
src/crypto/sha512/sha512block_riscv64.s
MOVBU ((index*8)+1)(X29), X6; \ MOVBU ((index*8)+2)(X29), X7; \ MOVBU ((index*8)+3)(X29), X8; \ SLL $56, X5; \ SLL $48, X6; \ OR X5, X6, X5; \ SLL $40, X7; \ OR X5, X7, X5; \ SLL $32, X8; \ OR X5, X8, X5; \ MOVBU ((index*8)+4)(X29), X9; \ MOVBU ((index*8)+5)(X29), X6; \ MOVBU ((index*8)+6)(X29), X7; \ MOVBU ((index*8)+7)(X29), X8; \ SLL $24, X9; \ OR X5, X9, X5; \ SLL $16, X6; \ OR X5, X6, X5; \
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Mar 07 14:57:07 UTC 2024 - 9.1K bytes - Viewed (0) -
src/image/jpeg/idct.go
x1 := s[4] << 11 x2 := s[6] x3 := s[2] x4 := s[1] x5 := s[7] x6 := s[5] x7 := s[3] // Stage 1. x8 := w7 * (x4 + x5) x4 = x8 + w1mw7*x4 x5 = x8 - w1pw7*x5 x8 = w3 * (x6 + x7) x6 = x8 - w3mw5*x6 x7 = x8 - w3pw5*x7 // Stage 2. x8 = x0 + x1 x0 -= x1 x1 = w6 * (x3 + x2) x2 = x1 - w2pw6*x2 x3 = x1 + w2mw6*x3 x1 = x4 + x6 x4 -= x6
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 02 23:18:37 UTC 2019 - 5K bytes - Viewed (0) -
src/cmd/asm/internal/asm/testdata/avx512enc/avx512_bitalg.s
VPSHUFBITQMB X24, X8, K6, K0 // 62923d0e8fc0 VPSHUFBITQMB X7, X8, K6, K0 // 62f23d0e8fc7 VPSHUFBITQMB X0, X8, K6, K0 // 62f23d0e8fc0 VPSHUFBITQMB (R8), X8, K6, K0 // 62d23d0e8f00 VPSHUFBITQMB 15(DX)(BX*2), X8, K6, K0 // 62f23d0e8f845a0f000000
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 22 14:57:15 UTC 2018 - 10.4K bytes - Viewed (0) -
test/closure.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sun Jul 01 17:59:50 UTC 2012 - 1.7K bytes - Viewed (0) -
src/cmd/compile/internal/test/align_test.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 24 19:06:05 UTC 2021 - 1.6K bytes - Viewed (0) -
src/cmd/asm/internal/asm/testdata/avx512enc/avx512cd.s
VPCONFLICTQ X11, K4, X8 // 6252fd0cc4c3 VPCONFLICTQ X16, K4, X8 // 6232fd0cc4c0 VPCONFLICTQ X6, K4, X8 // 6272fd0cc4c6 VPCONFLICTQ 15(R8)(R14*8), K4, X8 // 6212fd0cc484f00f000000 VPCONFLICTQ -15(R14)(R15*2), K4, X8 // 6212fd0cc4847ef1ffffff
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 22 14:57:15 UTC 2018 - 12.9K bytes - Viewed (0) -
test/inline_math_bits_rotate.go
// Test that inlining of math/bits.RotateLeft* treats those calls as intrinsics. package p import "math/bits" var ( x8 uint8 x16 uint16 x32 uint32 x64 uint64 x uint ) func f() { // ERROR "can inline f" x8 = bits.RotateLeft8(x8, 1) x16 = bits.RotateLeft16(x16, 1) x32 = bits.RotateLeft32(x32, 1) x64 = bits.RotateLeft64(x64, 1) x = bits.RotateLeft(x, 1)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 19 23:33:25 UTC 2023 - 571 bytes - Viewed (0) -
src/cmd/asm/internal/asm/testdata/amd64enc_extra.s
VMOVHPD 7(DX), X8, X8 // c539164207 or 6271bd28168207000000 or 6271bd48168207000000 VMOVHPD -15(R11)(CX*1), X8, X8 // c4413916440bf1 or 6251bd2816840bf1ffffff or 6251bd4816840bf1ffffff VMOVHPD (SP)(AX*2), X8, X8 // c539160444 or 6271bd28160444 or 6271bd48160444 VMOVHPD (AX), X20, X8 // 6271dd001600 or 6271dd201600 or 6271dd401600
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 11 18:32:50 UTC 2023 - 57.6K bytes - Viewed (0)