- Sort Score
- Result 10 results
- Languages All
Results 1 - 8 of 8 for VPXOR (0.07 sec)
-
src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s
VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 VMOVDQA CC3, tmpStoreAVX2
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Nov 29 21:28:33 UTC 2023 - 105.6K bytes - Viewed (0) -
src/crypto/sha512/sha512block_amd64.s
ADDQ R13, R15 ORQ R12, DI ADDQ R14, R11 ADDQ R15, R8 ADDQ R15, R11 ADDQ DI, R11 VPSRLQ $8, Y1, Y2 VPSLLQ $(64-8), Y1, Y1 VPOR Y2, Y1, Y1 VPXOR Y8, Y3, Y3 VPXOR Y1, Y3, Y1 VPADDQ Y1, Y0, Y0 VPERM2F128 $0x0, Y0, Y0, Y4 VPAND MASK_YMM_LO<>(SB), Y0, Y0 VPERM2F128 $0x11, Y7, Y7, Y2 VPSRLQ $6, Y2, Y8 MOVQ R11, DI
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 27K bytes - Viewed (0) -
src/crypto/sha1/sha1block_amd64.s
VPALIGNR $8, REG_SUB_16, REG_SUB_12, REG \ // w[i-14] VPSRLDQ $4, REG_SUB_4, Y0 // w[i-3] #define PRECALC_17(REG_SUB_16,REG_SUB_8,REG) \ VPXOR REG_SUB_8, REG, REG \ VPXOR REG_SUB_16, Y0, Y0 #define PRECALC_18(REG) \ VPXOR Y0, REG, REG \ VPSLLDQ $12, REG, Y9 #define PRECALC_19(REG) \ VPSLLD $1, REG, Y0 \ VPSRLD $31, REG, REG #define PRECALC_20(REG) \ VPOR REG, Y0, Y0 \
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 31.5K bytes - Viewed (0) -
src/runtime/memclr_amd64.s
MOVOU X15, 176(DI) MOVOU X15, 192(DI) MOVOU X15, 208(DI) MOVOU X15, 224(DI) MOVOU X15, 240(DI) SUBQ $256, BX ADDQ $256, DI CMPQ BX, $256 JAE loop JMP tail #endif loop_preheader_avx2: VPXOR X0, X0, X0 // For smaller sizes MOVNTDQ may be faster or slower depending on hardware. // For larger sizes it is always faster, even on dual Xeons with 30M cache.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 10 20:52:34 UTC 2022 - 4.9K bytes - Viewed (0) -
src/crypto/sha256/sha256block_amd64.s
XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) // S0 ; \ VPXOR XTMP1, XTMP3, XTMP3; \ RORXL $2, a, T1; \ // T1 = (a >> 2) // S0 XORL g, y2; \ // y2 = CH = ((f^g)&e)^g // CH ; \ VPXOR XTMP2, XTMP3, XTMP3; \ // XTMP3 = W[-15] ror 7 ^ W[-15] ror 18
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 47.3K bytes - Viewed (0) -
src/cmd/internal/obj/x86/anames.go
"VPTESTNMD", "VPTESTNMQ", "VPTESTNMW", "VPUNPCKHBW", "VPUNPCKHDQ", "VPUNPCKHQDQ", "VPUNPCKHWD", "VPUNPCKLBW", "VPUNPCKLDQ", "VPUNPCKLQDQ", "VPUNPCKLWD", "VPXOR", "VPXORD", "VPXORQ", "VRANGEPD", "VRANGEPS", "VRANGESD", "VRANGESS", "VRCP14PD", "VRCP14PS", "VRCP14SD", "VRCP14SS", "VRCP28PD", "VRCP28PS", "VRCP28SD",
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 11 18:32:50 UTC 2023 - 19.1K bytes - Viewed (0) -
src/cmd/asm/internal/asm/testdata/amd64enc.s
VPXOR X11, X9, X11 // c44131efdb VPXOR (BX), Y15, Y2 // c4e105ef13 or c585ef13 VPXOR (R11), Y15, Y2 // c4c105ef13 VPXOR Y2, Y15, Y2 // c4e105efd2 or c585efd2 VPXOR Y11, Y15, Y2 // c4c105efd3 VPXOR (BX), Y15, Y11 // c46105ef1b or c505ef1b
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Oct 08 21:38:44 UTC 2021 - 581.9K bytes - Viewed (0) -
src/cmd/internal/obj/x86/avx_optabs.go
// // The opcode array in the corresponding Optab entry // should contain the (VEX prefixes, opcode byte) pair // for each of the two forms. // For example, the entries for VPXOR are: // // VPXOR xmm2/m128, xmmV, xmm1 // VEX.NDS.128.66.0F.WIG EF /r // // VPXOR ymm2/m256, ymmV, ymm1 // VEX.NDS.256.66.0F.WIG EF /r // // Produce this optab entry: //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 23 15:34:19 UTC 2018 - 260.3K bytes - Viewed (0)