- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 13 for VSR (0.06 sec)
-
src/cmd/vendor/golang.org/x/arch/ppc64/ppc64asm/tables.go
{MTVSRBM, 0xfc1f07ff00000000, 0x1010064200000000, 0x0, // Move to VSR Byte Mask VX-form (mtvsrbm VRT,RB) [6]*argField{ap_VecReg_6_10, ap_Reg_16_20}}, {MTVSRBMI, 0xfc00003e00000000, 0x1000001400000000, 0x0, // Move To VSR Byte Mask Immediate DX-form (mtvsrbmi VRT,bm) [6]*argField{ap_VecReg_6_10, ap_ImmUnsigned_16_25_11_15_31_31}}, {MTVSRDM, 0xfc1f07ff00000000, 0x1013064200000000, 0x0, // Move to VSR Doubleword Mask VX-form (mtvsrdm VRT,RB)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Nov 22 17:16:14 UTC 2022 - 334.7K bytes - Viewed (0) -
src/cmd/internal/obj/ppc64/anames.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 01 18:50:29 UTC 2024 - 6.7K bytes - Viewed (0) -
src/cmd/internal/obj/ppc64/a.out.go
REG_CR4EQ REG_CR4SO REG_CR5LT REG_CR5GT REG_CR5EQ REG_CR5SO REG_CR6LT REG_CR6GT REG_CR6EQ REG_CR6SO REG_CR7LT REG_CR7GT REG_CR7EQ REG_CR7SO /* Align FPR and VSR vectors such that when masked with 0x3F they produce an equivalent VSX register. */ /* F0=4160 ... F31=4191 */ REG_F0 REG_F1 REG_F2 REG_F3 REG_F4 REG_F5 REG_F6 REG_F7 REG_F8
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 01 18:50:29 UTC 2024 - 16K bytes - Viewed (0) -
src/cmd/internal/obj/ppc64/asm_test.go
// REG_Rx & 31 == x // REG_Fx & 31 == x // REG_Vx & 31 == x // REG_VSx & 63 == x // REG_SPRx & 1023 == x // REG_CRx & 7 == x // // VR and FPR disjointly overlap VSR, interpreting as VSR registers should produce the correctly overlapped VSR. // REG_FPx & 63 == x // REG_Vx & 63 == x + 32 func TestRegValueAlignment(t *testing.T) { tstFunc := func(rstart, rend, msk, rout int) { for i := rstart; i <= rend; i++ {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Feb 09 22:14:57 UTC 2024 - 17.3K bytes - Viewed (0) -
src/internal/bytealg/equal_ppc64x.s
CMP R5, $16 // Use GPR checks for check for len <= 16 BLE check0_16 MOVD $0, R3 // Assume no-match in case BGELR CR6 returns CMP R5, $32 // Use overlapping VSX loads for len <= 32 BLE check17_32 // Do a pair of overlapping VSR compares CMP R5, $64 BLE check33_64 // Hybrid check + overlap compare. setup64: SRD $6, R5, R6 // number of 64 byte chunks to compare MOVD R6, CTR MOVD $16, R14 // index for VSX loads and stores MOVD $32, R15
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 21 16:47:45 UTC 2023 - 4.9K bytes - Viewed (0) -
src/internal/bytealg/indexbyte_ppc64x.s
LXVD2X (R0)(R9),V2 VCMPEQUBCC V2,V1,V6 BNE CR6,foundat0 // Match found at R8+48 bytes, jump out BR notfound cmp8: // Length 8 - 15 #ifdef GOPPC64_power10 // Load all the bytes into a single VSR in BE order. SLD $56,R4,R5 LXVLL R3,R5,V2 // Compare and count the number which don't match. VCMPEQUB V2,V1,V6 VCLZLSBB V6,R3 // If count is the number of bytes, or more. No matches are found. CMPU R3,R4
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 21 16:10:29 UTC 2023 - 6.3K bytes - Viewed (0) -
src/cmd/internal/obj/ppc64/asm9.go
{as: ASTXSIWX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */ /* VSX move from VSR */ {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4}, {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4}, /* VSX move to VSR */ {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4}, {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4},
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 13:55:28 UTC 2024 - 156.1K bytes - Viewed (0) -
src/crypto/aes/asm_ppc64x.s
VXOR IN0, KEY, IN0 // vxor 1,1,3 STXVD2X IN0, (R0+OUTENC) STXVD2X IN0, (R0+OUTDEC) RET l192: LXSDX (INP+R0), IN1 // Load next 8 bytes into upper half of VSR. XXBRD_ON_LE(IN1, IN1) // and convert to BE ordering on LE hosts. MOVD $4, CNT // li 7,4 STXVD2X IN0, (R0+OUTENC) STXVD2X IN0, (R0+OUTDEC) ADD $16, OUTENC, OUTENC
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 20 18:05:32 UTC 2024 - 18.6K bytes - Viewed (0) -
src/crypto/internal/nistec/p256_asm_ppc64le.s
VSEL T1, X1, SEL1, T1 \ VSEL T2, ZER, SEL1, T2 \ \ VSLDOI $15, T2, ZER, TT1 \ VSLDOI $15, T1, ZER, TT0 \ VSPLTISB $1, SEL1 \ VSR T0, SEL1, T0 \ // VSRL VSR T1, SEL1, T1 \ VSPLTISB $7, SEL1 \ // VREPIB VSL TT0, SEL1, TT0 \ VSL TT1, SEL1, TT1 \ VOR T0, TT0, T0 \ VOR T1, TT1, T1
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 56.5K bytes - Viewed (0) -
src/cmd/asm/internal/asm/testdata/ppc64.s
VSRB V1, V2, V3 // 10611204 VSRH V1, V2, V3 // 10611244 VSRW V1, V2, V3 // 10611284 VSRD V1, V2, V3 // 106116c4 VSR V1, V2, V3 // 106112c4 VSRO V1, V2, V3 // 1061144c VSLD V1, V2, V3 // 106115c4 VSRAB V1, V2, V3 // 10611304 VSRAH V1, V2, V3 // 10611344
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 21:53:50 UTC 2024 - 50.2K bytes - Viewed (0)