- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 33 for V19 (0.02 sec)
-
src/cmd/internal/notsha256/sha256block_ppc64x.s
SHA256ROUND1(V7, V0, V1, V2, V3, V4, V5, V6, V17, V18, V19, V11, V16, R_x090) SHA256ROUND1(V6, V7, V0, V1, V2, V3, V4, V5, V18, V19, V20, V12, V17, R_x0a0) SHA256ROUND1(V5, V6, V7, V0, V1, V2, V3, V4, V19, V20, V21, V13, V18, R_x0b0) SHA256ROUND1(V4, V5, V6, V7, V0, V1, V2, V3, V20, V21, V22, V14, V19, R_x0c0) SHA256ROUND1(V3, V4, V5, V6, V7, V0, V1, V2, V21, V22, V23, V15, V20, R_x0d0)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 19 23:33:27 UTC 2023 - 14.5K bytes - Viewed (0) -
platforms/documentation/docs/src/snippets/native-binaries/google-test/groovy/libs/googleTest/1.7.0/include/gtest/internal/gtest-param-util-generated.h
T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19) {} template <typename T> operator ParamGenerator<T>() const {
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Mon Nov 27 17:53:42 UTC 2023 - 187.7K bytes - Viewed (0) -
src/internal/bytealg/indexbyte_s390x.s
MOVD R3, R8 AND $15, R8 CMPBGT R8, $0, notaligned aligned: ADD R6, R4, R8 MOVD R8, R7 AND $-16, R7 // replicate c across V17 VLVGB $0, R5, V19 VREPB $0, V19, V17 vectorloop: CMPBGE R3, R7, residual VL 0(R3), V16 // load string to be searched into V16 ADD $16, R3 VFEEBS V16, V17, V18 // search V17 in V16 and set conditional code accordingly BVS vectorloop
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Nov 08 20:52:47 UTC 2018 - 2.5K bytes - Viewed (0) -
src/math/big/arith_s390x.s
VACQ V8, V16, V31, V24 VPDI $0x4, V17, V17, V17 // flip the doublewords to big-endian order VPDI $0x4, V18, V18, V18 // flip the doublewords to big-endian order VPDI $0x4, V19, V19, V19 // flip the doublewords to big-endian order VPDI $0x4, V20, V20, V20 // flip the doublewords to big-endian order VPDI $0x4, V21, V21, V21 // flip the doublewords to big-endian order
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 19 23:33:27 UTC 2023 - 20.3K bytes - Viewed (0) -
testing/performance/src/templates/native-dependents-resources/googleTest/libs/googleTest/1.7.0/include/gtest/internal/gtest-param-util-generated.h
T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19) {} template <typename T> operator ParamGenerator<T>() const {
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Thu Apr 04 07:21:38 UTC 2024 - 187.7K bytes - Viewed (0) -
src/crypto/sha256/sha256block_arm64.s
MOVD p_len+32(FP), R3 // message length VLD1 (R0), [V0.S4, V1.S4] // load h(a,b,c,d,e,f,g,h) VLD1.P 64(R2), [V16.S4, V17.S4, V18.S4, V19.S4] VLD1.P 64(R2), [V20.S4, V21.S4, V22.S4, V23.S4] VLD1.P 64(R2), [V24.S4, V25.S4, V26.S4, V27.S4] VLD1 (R2), [V28.S4, V29.S4, V30.S4, V31.S4] //load 64*4bytes K constant(K0-K63) blockloop:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 5.7K bytes - Viewed (0) -
src/crypto/aes/gcm_ppc64x.s
VCIPHERLAST V18, V23, V18; \ VCIPHERLAST V19, V23, V19; \ VCIPHERLAST V20, V23, V20; \ VCIPHERLAST V21, V23, V21; \ VCIPHERLAST V22, V23, V22; \ XXLXOR V1, V15, V1; \ XXLXOR V2, V16, V2; \ XXLXOR V3, V17, V3; \ XXLXOR V4, V18, V4; \ XXLXOR V5, V19, V5; \ XXLXOR V6, V20, V6; \ XXLXOR V7, V21, V7; \
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 27.1K bytes - Viewed (0) -
src/cmd/internal/obj/arm64/doc.go
VLD1.P (R6)(R11), [V31.D1] <=> ld1 {v31.1d}, [x6], x11 VFMLA V29.S2, V20.S2, V14.S2 <=> fmla v14.2s, v20.2s, v29.2s AESD V22.B16, V19.B16 <=> aesd v19.16b, v22.16b SCVTFWS R3, F16 <=> scvtf s17, w6 6. Align directive Go asm supports the PCALIGN directive, which indicates that the next instruction should be aligned
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Aug 07 00:21:42 UTC 2023 - 9.6K bytes - Viewed (0) -
src/crypto/aes/asm_ppc64x.s
VXOR V13, V13, V13 \ VXOR V14, V14, V14 \ VXOR V15, V15, V15 \ VXOR V16, V16, V16 \ VXOR V17, V17, V17 \ VXOR V18, V18, V18 \ VXOR V19, V19, V19 \ VXOR V20, V20, V20 //func cryptBlocksChain(src, dst *byte, length int, key *uint32, iv *byte, enc int, nr int) TEXT ·cryptBlocksChain(SB), NOSPLIT|NOFRAME, $0 MOVD src+0(FP), INP MOVD dst+8(FP), OUTP MOVD length+16(FP), LEN MOVD key+24(FP), KEYP
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 20 18:05:32 UTC 2024 - 18.6K bytes - Viewed (0) -
src/cmd/asm/internal/asm/testdata/arm64enc.s
UCVTFD R20, F11 // 8b02639e VADD V16, V19, V14 // 6e86f05e VADD V5.H8, V18.H8, V9.H8 // 4986654e VADDP V7.H8, V25.H8, V17.H8 // 31bf674e VADDV V3.H8, V0 // 60b8714e AESD V22.B16, V19.B16 // d35a284e
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Jul 24 01:11:41 UTC 2023 - 43.9K bytes - Viewed (0)