- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 15 for VAND (0.53 sec)
-
src/internal/bytealg/index_ppc64x.s
VSLDOI $1, V1, V2, V3 // V3=(V1:V2)<<1 VSLDOI $2, V1, V2, V4 // V4=(V1:V2)<<2 VAND V1, SEPMASK, V8 // Mask out sep size 0th index VAND V3, SEPMASK, V9 // Mask out sep size 1st index VAND V4, SEPMASK, V11 // Mask out sep size 2nd index VAND V5, SEPMASK, V12 // Mask out sep size 3rd index #endif VCMPEQUBCC V0, V8, V8 // compare masked string
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 21 16:47:45 UTC 2023 - 31.6K bytes - Viewed (0) -
src/internal/bytealg/indexbyte_arm64.s
// the first bytes and mask off the irrelevant part. VLD1.P (R3), [V1.B16, V2.B16] SUB $0x20, R9, R4 ADDS R4, R2, R2 VCMEQ V0.B16, V1.B16, V3.B16 VCMEQ V0.B16, V2.B16, V4.B16 VAND V5.B16, V3.B16, V3.B16 VAND V5.B16, V4.B16, V4.B16 VADDP V4.B16, V3.B16, V6.B16 // 256->128 VADDP V6.B16, V6.B16, V6.B16 // 128->64 VMOV V6.D[0], R6 // Clear the irrelevant lower bits LSL $1, R9, R4 LSR R4, R6, R6
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Nov 08 20:52:47 UTC 2018 - 3.3K bytes - Viewed (0) -
src/internal/bytealg/count_arm64.s
// Count the target byte in 32-byte chunk chunk_loop: VLD1.P (R0), [V1.B16, V2.B16] CMP R0, R3 VCMEQ V0.B16, V1.B16, V3.B16 VCMEQ V0.B16, V2.B16, V4.B16 // Clear the higher 7 bits VAND V5.B16, V3.B16, V3.B16 VAND V5.B16, V4.B16, V4.B16 // Count lanes match the requested byte VADDP V4.B16, V3.B16, V6.B16 // 32B->16B VUADDLV V6.B16, V7 // Accumulate the count in low 64-bit element of V8 when inside the loop
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Oct 31 17:00:27 UTC 2023 - 2K bytes - Viewed (0) -
src/internal/bytealg/equal_arm64.s
VLD1.P (R1), [V4.D2, V5.D2, V6.D2, V7.D2] VCMEQ V0.D2, V4.D2, V8.D2 VCMEQ V1.D2, V5.D2, V9.D2 VCMEQ V2.D2, V6.D2, V10.D2 VCMEQ V3.D2, V7.D2, V11.D2 VAND V8.B16, V9.B16, V8.B16 VAND V8.B16, V10.B16, V8.B16 VAND V8.B16, V11.B16, V8.B16 CMP R0, R6 VMOV V8.D[0], R4 VMOV V8.D[1], R5 CBZ R4, not_equal CBZ R5, not_equal BNE chunk64_loop AND $0x3f, R2, R2 CBZ R2, equal chunk16:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Jan 24 16:07:25 UTC 2024 - 2.5K bytes - Viewed (0) -
src/cmd/internal/obj/arm64/anames.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 18 01:40:37 UTC 2023 - 5.4K bytes - Viewed (0) -
src/cmd/internal/obj/ppc64/anames.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 01 18:50:29 UTC 2024 - 6.7K bytes - Viewed (0) -
src/hash/crc32/crc32_ppc64le.s
VXOR V0,V1,V0 #ifdef REFLECT VSPLTISB $1,V1 VSL V0,V1,V0 #endif VAND V0,mask_64bit,V0 #ifndef REFLECT VPMSUMD V0,const1,V1 VSLDOI $8,zeroes,V1,V1 VPMSUMD V1,const2,V1 VXOR V0,V1,V0 VSLDOI $8,V0,zeroes,V0 #else VAND V0,mask_32bit,V1 VPMSUMD V1,const1,V1 VAND V1,mask_32bit,V1 VPMSUMD V1,const2,V1 VXOR V0,V1,V0 VSLDOI $4,V0,zeroes,V0 #endif
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 06 12:09:50 UTC 2024 - 13.1K bytes - Viewed (0) -
src/crypto/aes/gcm_arm64.s
AESMC B0.B16, B0.B16 AESE T1.B16, B0.B16 VEOR T2.B16, B0.B16, B0.B16 VREV64 B0.B16, B0.B16 // Multiply by 2 modulo P VMOV B0.D[0], I ASR $63, I VMOV I, T1.D[0] VMOV I, T1.D[1] VAND POLY.B16, T1.B16, T1.B16 VUSHR $63, B0.D2, T2.D2 VEXT $8, ZERO.B16, T2.B16, T2.B16 VSHL $1, B0.D2, B0.D2 VEOR T1.B16, B0.B16, B0.B16 VEOR T2.B16, B0.B16, B0.B16 // Can avoid this when VSLI is available
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 21.5K bytes - Viewed (0) -
src/cmd/asm/internal/asm/testdata/arm64enc.s
AESE V31.B16, V29.B16 // fd4b284e AESIMC V12.B16, V27.B16 // 9b79284e AESMC V14.B16, V28.B16 // dc69284e VAND V4.B16, V4.B16, V9.B16 // 891c244e VCMEQ V24.S4, V13.S4, V12.S4 // ac8db86e VCNT V13.B8, V11.B8 // ab59200e
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Jul 24 01:11:41 UTC 2023 - 43.9K bytes - Viewed (0) -
src/crypto/aes/gcm_ppc64x.s
VSPLTISB $7, T2 VOR XC2, T1, XC2 // 0xc2....01 VSPLTB $0, H, T1 // most significant byte VSL H, T0, H // H<<=1 VSRAB T1, T2, T1 // broadcast carry bit VAND T1, XC2, T1 VXOR H, T1, IN // twisted H VSLDOI $8, IN, IN, H // twist even more ... VSLDOI $8, ZERO, XC2, XC2 // 0xc2.0 VSLDOI $8, ZERO, H, HL // ... and split VSLDOI $8, H, ZERO, HH
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 27.1K bytes - Viewed (0)