- Sort Score
- Result 10 results
- Languages All
Results 1 - 6 of 6 for 16B (0.04 sec)
-
src/internal/bytealg/indexbyte_ppc64x.s
#ifndef GOPPC64_power9 #define ADJUST_FOR_CNTLZW -16 #else #define ADJUST_FOR_CNTLZW 0 #endif // Now, find the index of the 16B vector the match was discovered in. If CNTLZW is used // to determine the offset into the 16B vector, it will overcount by 16. Account for it here. foundat3: SUB R3,R8,R3 ADD $48+ADJUST_FOR_CNTLZW,R3 BR vfound foundat2: SUB R3,R8,R3
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 21 16:10:29 UTC 2023 - 6.3K bytes - Viewed (0) -
src/cmd/internal/obj/arm64/doc.go
VLD1.P (R6)(R11), [V31.D1] <=> ld1 {v31.1d}, [x6], x11 VFMLA V29.S2, V20.S2, V14.S2 <=> fmla v14.2s, v20.2s, v29.2s AESD V22.B16, V19.B16 <=> aesd v19.16b, v22.16b SCVTFWS R3, F16 <=> scvtf s17, w6 6. Align directive Go asm supports the PCALIGN directive, which indicates that the next instruction should be aligned
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Aug 07 00:21:42 UTC 2023 - 9.6K bytes - Viewed (0) -
src/internal/bytealg/count_arm64.s
VCMEQ V0.B16, V2.B16, V4.B16 // Clear the higher 7 bits VAND V5.B16, V3.B16, V3.B16 VAND V5.B16, V4.B16, V4.B16 // Count lanes match the requested byte VADDP V4.B16, V3.B16, V6.B16 // 32B->16B VUADDLV V6.B16, V7 // Accumulate the count in low 64-bit element of V8 when inside the loop VADD V7, V8 BNE chunk_loop VMOV V8.D[0], R6 ADD R6, R11, R11 CBZ R2, done tail:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Oct 31 17:00:27 UTC 2023 - 2K bytes - Viewed (0) -
src/internal/bytealg/count_ppc64x.s
ADD R14, R18, R18 ADD $16, R3, R3 ANDCC $15, R4, R4 small_tail_p10: SLD $56, R4, R6 LXVLL R3, R6, V0 VCMPEQUB V0, V1, V0 VCLRRB V0, R4, V0 // If <16B being compared, clear matches of the 16-R4 bytes. VCNTMBB V0, $1, R14 // Sum the value of bit 0 of each byte of the compare into R14. SRD $56, R14, R14 // The result of VCNTMBB is shifted. Unshift it. ADD R14, R18, R3 RET
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Aug 14 20:30:44 UTC 2023 - 3.6K bytes - Viewed (0) -
src/runtime/cgo/abi_ppc64x.h
FMOVD (offset+8*14)(R1), F28 \ FMOVD (offset+8*15)(R1), F29 \ FMOVD (offset+8*16)(R1), F30 \ FMOVD (offset+8*17)(R1), F31 // Save and restore VR20-31 (aka VSR56-63). These // macros must point to a 16B aligned offset. #define SAVE_VR_SIZE (12*16) #define SAVE_VR(offset, rtmp) \ MOVD $(offset+16*0), rtmp \ STVX V20, (rtmp)(R1) \ MOVD $(offset+16*1), rtmp \ STVX V21, (rtmp)(R1) \
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 03 20:17:02 UTC 2023 - 6.6K bytes - Viewed (0) -
src/internal/bytealg/compare_ppc64x.s
PCALIGN $16 cmp64_tail_gt16: // 17 - 32B LXVD2X (R0)(R5),V3 LXVD2X (R0)(R6),V4 VCMPEQUDCC V3,V4,V1 BGE CR6,different BR cmp64_tail_gt0 PCALIGN $16 cmp64_tail_gt0: // 1 - 16B LXVD2X (R5)(R9),V3 LXVD2X (R6)(R9),V4 VCMPEQUDCC V3,V4,V1 BGE CR6,different RET PCALIGN $16 cmp32: // 32 - 63B ANDCC $31,R9,R9 LXVD2X (R0)(R5),V3 LXVD2X (R0)(R6),V4
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Aug 28 17:33:20 UTC 2023 - 6.7K bytes - Viewed (0)