Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 8 of 8 for LVX (0.02 sec)

  1. src/hash/crc32/crc32_ppc64le.s

    	BC	18,0,v4
    
    	LVX	(R4+off80),V5
    	LVX	(R3+off80),V17
    	VPMSUMW	V5,V17,V5
    	BC	18,0,v5
    
    	LVX	(R4+off96),V6
    	LVX	(R3+off96),V16
    	VPMSUMW	V6,V16,V6
    	BC	18,0,v6
    
    	LVX	(R4+off112),V7
    	LVX	(R3+off112),V17
    	VPMSUMW	V7,V17,V7
    	BC	18,0,v7
    
    	ADD	$128,R3
    	ADD	$128,R4
    
    	LVX	(R4),V8
    	LVX	(R3),V16
    	VPMSUMW	V8,V16,V8
    	BC	18,0,v8
    
    	LVX	(R4+off16),V9
    	LVX	(R3+off16),V17
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon May 06 12:09:50 UTC 2024
    - 13.1K bytes
    - Viewed (0)
  2. src/crypto/aes/asm_ppc64x.s

    #ifdef NEEDS_ESPERM
    	MOVD	$·rcon(SB), PTR // PTR points to rcon addr
    	LVX	(PTR), ESPERM
    	ADD	$0x10, PTR
    #else
    	MOVD	$·rcon+0x10(SB), PTR // PTR points to rcon addr (skipping permute vector)
    #endif
    
    	// Get key from memory and write aligned into VR
    	P8_LXVB16X(INP, R0, IN0)
    	ADD	$0x10, INP, INP
    	MOVD	$0x20, TEMP
    
    	CMPW	ROUNDS, $12
    	LVX	(PTR)(R0), RCON    // lvx   4,0,6      Load first 16 bytes into RCON
    	LVX	(PTR)(TEMP), MASK
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon May 20 18:05:32 UTC 2024
    - 18.6K bytes
    - Viewed (0)
  3. src/cmd/internal/notsha256/sha256block_ppc64x.s

    loop:
    	MOVD	TBL_STRT, TBL
    	LVX	(TBL)(R_x000), KI
    
    	LXVD2X	(INP)(R_x000), V8 // load v8 in advance
    
    	// Offload to VSR24-31 (aka FPR24-31)
    	XXLOR	V0, V0, VS24
    	XXLOR	V1, V1, VS25
    	XXLOR	V2, V2, VS26
    	XXLOR	V3, V3, VS27
    	XXLOR	V4, V4, VS28
    	XXLOR	V5, V5, VS29
    	XXLOR	V6, V6, VS30
    	XXLOR	V7, V7, VS31
    
    	VADDUWM	KI, V7, V7        // h+K[i]
    	LVX	(TBL)(R_x010), KI
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Oct 19 23:33:27 UTC 2023
    - 14.5K bytes
    - Viewed (0)
  4. src/crypto/sha256/sha256block_ppc64x.s

    loop:
    	MOVD	TBL_STRT, TBL
    	LVX	(TBL)(R_x000), KI
    
    	LXVD2X	(INP)(R_x000), V8 // load v8 in advance
    
    	// Offload to VSR24-31 (aka FPR24-31)
    	XXLOR	V0, V0, VS24
    	XXLOR	V1, V1, VS25
    	XXLOR	V2, V2, VS26
    	XXLOR	V3, V3, VS27
    	XXLOR	V4, V4, VS28
    	XXLOR	V5, V5, VS29
    	XXLOR	V6, V6, VS30
    	XXLOR	V7, V7, VS31
    
    	VADDUWM	KI, V7, V7        // h+K[i]
    	LVX	(TBL)(R_x010), KI
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 04 17:29:44 UTC 2024
    - 14.4K bytes
    - Viewed (0)
  5. src/crypto/sha512/sha512block_ppc64x.s

    	VSHASIGMAD	$0, a, $1, S0; \
    	VADDUDM		FUNC, h, h; \
    	VXOR		b, a, FUNC; \
    	VADDUDM		S1, h, h; \
    	VSEL		b, c, FUNC, FUNC; \
    	VADDUDM		KI, g, g; \
    	VADDUDM		h, d, d; \
    	VADDUDM		FUNC, S0, S0; \
    	LVX		(TBL)(idx), KI; \
    	VADDUDM		S0, h, h
    
    #define SHA512ROUND1(a, b, c, d, e, f, g, h, xi, xj, xj_1, xj_9, xj_14, idx) \
    	VSHASIGMAD	$0, xj_1, $0, s0; \
    	VSEL		g, f, e, FUNC; \
    	VSHASIGMAD	$15, e, $1, S1; \
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 04 17:29:44 UTC 2024
    - 15.8K bytes
    - Viewed (0)
  6. src/runtime/asm_ppc64x.s

    	MOVD	$-192, R12
    	LVX	(R0+R12), V20
    	MOVD	$-176, R12
    	LVX	(R0+R12), V21
    	MOVD	$-160, R12
    	LVX	(R0+R12), V22
    	MOVD	$-144, R12
    	LVX	(R0+R12), V23
    	MOVD	$-128, R12
    	LVX	(R0+R12), V24
    	MOVD	$-112, R12
    	LVX	(R0+R12), V25
    	MOVD	$-96, R12
    	LVX	(R0+R12), V26
    	MOVD	$-80, R12
    	LVX	(R0+R12), V27
    	MOVD	$-64, R12
    	LVX	(R0+R12), V28
    	MOVD	$-48, R12
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 18:17:17 UTC 2024
    - 45.4K bytes
    - Viewed (0)
  7. src/cmd/vendor/golang.org/x/arch/ppc64/ppc64asm/plan9.go

    		if args[1] == "0" {
    			return op + " (" + args[2] + ")," + args[0]
    		}
    		return op + " (" + args[2] + ")(" + args[1] + ")," + args[0]
    
    	case LXVX, LXVD2X, LXVW4X, LXVH8X, LXVB16X, LVX, LVXL, LVSR, LVSL, LVEBX, LVEHX, LVEWX, LXSDX, LXSIWAX:
    		return op + " (" + args[2] + ")(" + args[1] + ")," + args[0]
    
    	case LXV:
    		return op + " " + args[1] + "," + args[0]
    
    	case LXVL, LXVLL:
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Nov 22 17:16:14 UTC 2022
    - 10.9K bytes
    - Viewed (0)
  8. src/crypto/aes/gcm_ppc64x.s

    	MOVD    in+32(FP), BLK_INP
    	MOVD    in_len+40(FP), IN_LEN
    	MOVD    counter+56(FP), COUNTER
    	MOVD    key+64(FP), BLK_KEY
    
    // Set up permute string when needed.
    #ifdef NEEDS_ESPERM
    	MOVD    $·rcon(SB), R14
    	LVX     (R14), ESPERM   // Permute value for P8_ macros.
    #endif
    	SETUP_COUNTER		// V30 Counter V31 BE {0, 0, 0, 1}
    	LOAD_KEYS(BLK_KEY, KEY_LEN)	// VS1 - VS10/12/14 based on keysize
    	CMP     IN_LEN, $128
    	BLT	block64
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 04 17:29:44 UTC 2024
    - 27.1K bytes
    - Viewed (0)
Back to top