- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 11 for vspltb (0.12 sec)
-
src/internal/bytealg/index_ppc64x.s
SLD $24, R21 MTVSRD R21, V10 VSPLTW $1, V10, V29 VSLDOI $2, V29, V29, V30 // Mask 0x0000ff000000ff00... MOVD $0xffff, R21 SLD $16, R21 MTVSRD R21, V10 VSPLTW $1, V10, V31 // Mask 0xffff0000ffff0000... VSPLTW $0, V0, V1 // Splat 1st word of separator index4loop: VLOADSWAP(R7, R0, V2, V2) // Load 16 bytes @R7 into V2 next4: VSPLTISB $0, V10 // Clear
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 21 16:47:45 UTC 2023 - 31.6K bytes - Viewed (0) -
src/math/big/arith_ppc64x.s
CMP R8, R4 BGE loopexit // Already at end? // vectorize if len(z) is >=3, else jump to scalar loop CMP R4, $3 BLT scalar MTVSRD R9, VS38 // s VSPLTB $7, V6, V4 MTVSRD R5, VS39 // ŝ VSPLTB $7, V7, V2 ADD $-2, R4, R16 PCALIGN $16 loopback: ADD $-1, R8, R10 SLD $3, R10 LXVD2X (R6)(R10), VS32 // load x[i-1], x[i] SLD $3, R8, R12
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 18:17:17 UTC 2024 - 16.8K bytes - Viewed (0) -
src/crypto/aes/gcm_ppc64x.s
MOVD $0x30, R10 LXVD2X (HTBL)(R0), VH // Load H VSPLTISB $-16, XC2 // 0xf0 VSPLTISB $1, T0 // one VADDUBM XC2, XC2, XC2 // 0xe0 VXOR ZERO, ZERO, ZERO VOR XC2, T0, XC2 // 0xe1 VSLDOI $15, XC2, ZERO, XC2 // 0xe1... VSLDOI $1, ZERO, T0, T1 // ...1 VADDUBM XC2, XC2, XC2 // 0xc2... VSPLTISB $7, T2 VOR XC2, T1, XC2 // 0xc2....01
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 27.1K bytes - Viewed (0) -
src/crypto/aes/asm_ppc64x.s
VXOR IN0, TMP, IN0 // vxor 1,1,6 VSLDOI $12, ZERO, TMP, TMP // vsldoi 6,0,6,12 VXOR IN0, TMP, IN0 // vxor 1,1,6 VSLDOI $8, ZERO, IN1, STAGE // vsldoi 7,0,2,8 VSPLTW $3, IN0, TMP // vspltw 6,1,3 VXOR TMP, IN1, TMP // vxor 6,6,2 VSLDOI $12, ZERO, IN1, IN1 // vsldoi 2,0,2,12 VADDUWM RCON, RCON, RCON // vadduwm 4,4,4 VXOR IN1, TMP, IN1 // vxor 2,2,6
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 20 18:05:32 UTC 2024 - 18.6K bytes - Viewed (0) -
src/cmd/internal/obj/ppc64/a.out.go
AVCMPNEZB AVCMPNEZBCC AVCMPNEB AVCMPNEBCC AVCMPNEH AVCMPNEHCC AVCMPNEW AVCMPNEWCC AVPERM AVPERMXOR AVPERMR AVBPERMQ AVBPERMD AVSEL AVSPLTB AVSPLTH AVSPLTW AVSPLTISB AVSPLTISH AVSPLTISW AVCIPH AVCIPHER AVCIPHERLAST AVNCIPH AVNCIPHER AVNCIPHERLAST AVSBOX AVSHASIGMA AVSHASIGMAW AVSHASIGMAD AVMRGEW
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 01 18:50:29 UTC 2024 - 16K bytes - Viewed (0) -
platforms/native/platform-native/src/main/java/org/gradle/nativeplatform/toolchain/internal/msvcpp/DefaultVisualStudioLocator.java
private VisualCppInstall buildVisualCppInstall(String name, File vsPath, File basePath, VersionNumber version, Compatibility compatibility) { switch (compatibility) { case LEGACY: return buildLegacyVisualCppInstall(name, vsPath, basePath, version); case VS2017_OR_LATER: return buildVisualCppInstall(name, vsPath, basePath, version); default:
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Mon Dec 11 13:37:56 UTC 2023 - 13.9K bytes - Viewed (0) -
src/crypto/sha256/sha256block_ppc64x.s
SLD $6, LEN ADD INP, LEN, END CMP INP, END BEQ end MOVD $·kcon(SB), TBL_STRT MOVD $0x10, R_x010 #ifdef GOARCH_ppc64le MOVWZ $8, TEMP LVSL (TEMP)(R0), LEMASK VSPLTISB $0x0F, KI VXOR KI, LEMASK, LEMASK #endif LXVW4X (CTX)(R_x000), V0 LXVW4X (CTX)(R_x010), V4 // unpack the input values into vector registers VSLDOI $4, V0, V0, V1 VSLDOI $8, V0, V0, V2
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 14.4K bytes - Viewed (0) -
src/cmd/internal/notsha256/sha256block_ppc64x.s
SLD $6, LEN ADD INP, LEN, END CMP INP, END BEQ end MOVD $·kcon(SB), TBL_STRT MOVD $0x10, R_x010 #ifdef GOARCH_ppc64le MOVWZ $8, TEMP LVSL (TEMP)(R0), LEMASK VSPLTISB $0x0F, KI VXOR KI, LEMASK, LEMASK #endif LXVW4X (CTX)(R_x000), V0 LXVW4X (CTX)(R_x010), V4 // unpack the input values into vector registers VSLDOI $4, V0, V0, V1 VSLDOI $8, V0, V0, V2
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 19 23:33:27 UTC 2023 - 14.5K bytes - Viewed (0) -
src/crypto/sha512/sha512block_ppc64x.s
MOVD $0x0e0, R_x0e0 MOVD $0x0f0, R_x0f0 MOVD $0x100, R_x100 MOVD $0x110, R_x110 #ifdef GOARCH_ppc64le // Generate the mask used with VPERM for LE MOVWZ $8, TEMP LVSL (TEMP)(R0), LEMASK VSPLTISB $0x0F, KI VXOR KI, LEMASK, LEMASK #endif LXVD2X (CTX)(R_x000), VS32 // v0 = vs32 LXVD2X (CTX)(R_x010), VS34 // v2 = vs34 LXVD2X (CTX)(R_x020), VS36 // v4 = vs36
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 15.8K bytes - Viewed (0) -
src/hash/crc32/crc32_ppc64le.s
BR startbarConst barcstTable: MOVD $·CastBarConst(SB),R3 startbarConst: LVX (R3),const1 LVX (R3+off16),const2 VSLDOI $8,V0,V0,V1 VXOR V0,V1,V0 #ifdef REFLECT VSPLTISB $1,V1 VSL V0,V1,V0 #endif VAND V0,mask_64bit,V0 #ifndef REFLECT VPMSUMD V0,const1,V1 VSLDOI $8,zeroes,V1,V1 VPMSUMD V1,const2,V1 VXOR V0,V1,V0 VSLDOI $8,V0,zeroes,V0
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 06 12:09:50 UTC 2024 - 13.1K bytes - Viewed (0)