- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 33 for x8 (0.19 sec)
-
src/internal/bytealg/compare_riscv64.s
AND $7, X10, X7 AND $7, X12, X8 BNE X7, X8, check8_unaligned BEQZ X7, compare32 // Check one byte at a time until we reach 8 byte alignment. SUB X7, X0, X7 ADD $8, X7, X7 SUB X7, X5, X5 align: SUB $1, X7 MOVBU 0(X10), X8 MOVBU 0(X12), X9 BNE X8, X9, cmp ADD $1, X10 ADD $1, X12 BNEZ X7, align check32: // X6 contains $32
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Nov 09 13:57:06 UTC 2023 - 3.9K bytes - Viewed (0) -
src/crypto/sha512/sha512block_riscv64.s
MOVBU ((index*8)+1)(X29), X6; \ MOVBU ((index*8)+2)(X29), X7; \ MOVBU ((index*8)+3)(X29), X8; \ SLL $56, X5; \ SLL $48, X6; \ OR X5, X6, X5; \ SLL $40, X7; \ OR X5, X7, X5; \ SLL $32, X8; \ OR X5, X8, X5; \ MOVBU ((index*8)+4)(X29), X9; \ MOVBU ((index*8)+5)(X29), X6; \ MOVBU ((index*8)+6)(X29), X7; \ MOVBU ((index*8)+7)(X29), X8; \ SLL $24, X9; \ OR X5, X9, X5; \ SLL $16, X6; \ OR X5, X6, X5; \
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Mar 07 14:57:07 UTC 2024 - 9.1K bytes - Viewed (0) -
src/image/jpeg/idct.go
x1 := s[4] << 11 x2 := s[6] x3 := s[2] x4 := s[1] x5 := s[7] x6 := s[5] x7 := s[3] // Stage 1. x8 := w7 * (x4 + x5) x4 = x8 + w1mw7*x4 x5 = x8 - w1pw7*x5 x8 = w3 * (x6 + x7) x6 = x8 - w3mw5*x6 x7 = x8 - w3pw5*x7 // Stage 2. x8 = x0 + x1 x0 -= x1 x1 = w6 * (x3 + x2) x2 = x1 - w2pw6*x2 x3 = x1 + w2mw6*x3 x1 = x4 + x6 x4 -= x6
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 02 23:18:37 UTC 2019 - 5K bytes - Viewed (0) -
test/closure.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sun Jul 01 17:59:50 UTC 2012 - 1.7K bytes - Viewed (0) -
src/cmd/compile/internal/test/align_test.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 24 19:06:05 UTC 2021 - 1.6K bytes - Viewed (0) -
test/inline_math_bits_rotate.go
// Test that inlining of math/bits.RotateLeft* treats those calls as intrinsics. package p import "math/bits" var ( x8 uint8 x16 uint16 x32 uint32 x64 uint64 x uint ) func f() { // ERROR "can inline f" x8 = bits.RotateLeft8(x8, 1) x16 = bits.RotateLeft16(x16, 1) x32 = bits.RotateLeft32(x32, 1) x64 = bits.RotateLeft64(x64, 1) x = bits.RotateLeft(x, 1)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 19 23:33:25 UTC 2023 - 571 bytes - Viewed (0) -
src/crypto/internal/bigmod/nat_riscv64.s
MOV 1*8(X5), X13 // z[1] MOV 2*8(X5), X16 // z[2] MOV 3*8(X5), X19 // z[3] MOV 0*8(X7), X8 // x[0] MOV 1*8(X7), X11 // x[1] MOV 2*8(X7), X14 // x[2] MOV 3*8(X7), X17 // x[3] MULHU X8, X6, X9 // z_hi[0] = x[0] * y MUL X8, X6, X8 // z_lo[0] = x[0] * y ADD X8, X10, X21 // z_lo[0] = x[0] * y + z[0] SLTU X8, X21, X22 ADD X9, X22, X9 // z_hi[0] = x[0] * y + z[0] ADD X21, X29, X10 // z_lo[0] = x[0] * y + z[0] + c
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Nov 09 13:57:06 UTC 2023 - 2.2K bytes - Viewed (0) -
src/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s
VREPF $3, KEY0, X7 VREPF $0, KEY1, X8 VREPF $1, KEY1, X9 VREPF $2, KEY1, X10 VREPF $3, KEY1, X11 VLR CTR, X12 VREPF $1, NONCE, X13 VREPF $2, NONCE, X14 VREPF $3, NONCE, X15 MOVD $(NUM_ROUNDS/2), R1 loop: ROUND4(X0, X4, X12, X8, X1, X5, X13, X9, X2, X6, X14, X10, X3, X7, X15, X11) ROUND4(X0, X5, X15, X10, X1, X6, X12, X11, X2, X7, X13, X8, X3, X4, X14, X9) ADD $-1, R1
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 19 23:33:33 UTC 2023 - 5.3K bytes - Viewed (0) -
test/interface/embed3.dir/embed1.go
func (X3) foo(int) {} type X4 struct{ p.M1 } type X5 struct{ p.M1 } func (X5) foo(int) {} type X6 struct{ p.M2 } type X7 struct{ p.M2 } func (X7) foo() {} type X8 struct{ p.M2 } func (X8) foo(int) {} func main() { var i1 interface{} = X1{} check(func() { _ = i1.(p.I1) }, "interface conversion: main.X1 is not p.I1: missing method Foo") var i2 interface{} = X2{}
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Nov 06 09:09:59 UTC 2019 - 1.6K bytes - Viewed (0) -
src/cmd/asm/internal/asm/testdata/avx512enc/avx512_vpopcntdq.s
TEXT asmtest_avx512_vpopcntdq(SB), NOSPLIT, $0 VPOPCNTD X12, K2, X8 // 62527d0a55c4 VPOPCNTD X16, K2, X8 // 62327d0a55c0 VPOPCNTD X23, K2, X8 // 62327d0a55c7 VPOPCNTD (R14), K2, X8 // 62527d0a5506 VPOPCNTD -7(DI)(R8*8), K2, X8 // 62327d0a5584c7f9ffffff
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 22 14:57:15 UTC 2018 - 5.5K bytes - Viewed (0)