Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 7 of 7 for VSLDOI (0.13 sec)

  1. src/crypto/aes/asm_ppc64x.s

    	VPERM	IN1, IN1, MASK, KEY // vperm 3,2,2,5
    	VSLDOI	$12, ZERO, IN0, TMP // vsldoi 6,0,1,12
    	VCIPHERLAST	KEY, RCON, KEY      // vcipherlast 3,3,4
    
    	VXOR	IN0, TMP, IN0       // vxor 1,1,6
    	VSLDOI	$12, ZERO, TMP, TMP // vsldoi 6,0,6,12
    	VXOR	IN0, TMP, IN0       // vxor 1,1,6
    	VSLDOI	$12, ZERO, TMP, TMP // vsldoi 6,0,6,12
    	VXOR	IN0, TMP, IN0       // vxor 1,1,6
    
    	VSLDOI	$8, ZERO, IN1, STAGE  // vsldoi 7,0,2,8
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon May 20 18:05:32 UTC 2024
    - 18.6K bytes
    - Viewed (0)
  2. src/cmd/internal/notsha256/sha256block_ppc64x.s

    	VXOR	KI, LEMASK, LEMASK
    #endif
    
    	LXVW4X	(CTX)(R_x000), V0
    	LXVW4X	(CTX)(R_x010), V4
    
    	// unpack the input values into vector registers
    	VSLDOI	$4, V0, V0, V1
    	VSLDOI	$8, V0, V0, V2
    	VSLDOI	$12, V0, V0, V3
    	VSLDOI	$4, V4, V4, V5
    	VSLDOI	$8, V4, V4, V6
    	VSLDOI	$12, V4, V4, V7
    
    	MOVD	$0x020, R_x020
    	MOVD	$0x030, R_x030
    	MOVD	$0x040, R_x040
    	MOVD	$0x050, R_x050
    	MOVD	$0x060, R_x060
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Oct 19 23:33:27 UTC 2023
    - 14.5K bytes
    - Viewed (0)
  3. src/internal/bytealg/index_ppc64x.s

    	MOVD     $3, R9             // Number of bytes beyond 16
    	VLOADSWAP(R7, R9, V3, V3)   // Load 16 bytes @R7+3 into V3
    	VSLDOI   $13, V3, V10, V3   // Shift left last 3 bytes
    	VSLDOI   $1, V2, V3, V4     // V4=(V2:V3)<<1
    	VSLDOI   $2, V2, V3, V9     // V9=(V2:V3)<<2
    	VSLDOI   $3, V2, V3, V10    // V10=(V2:v3)<<3
    	VCMPEQUW V1, V2, V5         // compare index 0, 4, ... with sep
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Apr 21 16:47:45 UTC 2023
    - 31.6K bytes
    - Viewed (0)
  4. src/crypto/aes/gcm_ppc64x.s

    	VSLDOI $8, XM, ZERO, T0
    	VSLDOI $8, ZERO, XM, T1
    	VSLDOI $8, XM1, ZERO, HL
    	VSLDOI $8, ZERO, XM1, H
    	VXOR   XL, T0, XL
    	VXOR   XH, T1, XH
    	VXOR   XL1, HL, XL1
    	VXOR   XH1, H, XH1
    
    	VSLDOI $8, XL, XL, XL
    	VSLDOI $8, XL1, XL1, XL1
    	VXOR   XL, T2, XL
    	VXOR   XL1, HH, XL1
    
    	VSLDOI  $8, XL, XL, T1  // 2nd reduction phase
    	VSLDOI  $8, XL1, XL1, H // 2nd reduction phase
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 04 17:29:44 UTC 2024
    - 27.1K bytes
    - Viewed (0)
  5. src/crypto/sha256/sha256block_ppc64x.s

    	VXOR	KI, LEMASK, LEMASK
    #endif
    
    	LXVW4X	(CTX)(R_x000), V0
    	LXVW4X	(CTX)(R_x010), V4
    
    	// unpack the input values into vector registers
    	VSLDOI	$4, V0, V0, V1
    	VSLDOI	$8, V0, V0, V2
    	VSLDOI	$12, V0, V0, V3
    	VSLDOI	$4, V4, V4, V5
    	VSLDOI	$8, V4, V4, V6
    	VSLDOI	$12, V4, V4, V7
    
    	MOVD	$0x020, R_x020
    	MOVD	$0x030, R_x030
    	MOVD	$0x040, R_x040
    	MOVD	$0x050, R_x050
    	MOVD	$0x060, R_x060
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 04 17:29:44 UTC 2024
    - 14.4K bytes
    - Viewed (0)
  6. src/crypto/sha512/sha512block_ppc64x.s

    	LXVD2X	(CTX)(R_x010), VS34	// v2 = vs34
    	LXVD2X	(CTX)(R_x020), VS36	// v4 = vs36
    
    	// unpack the input values into vector registers
    	VSLDOI	$8, V0, V0, V1
    	LXVD2X	(CTX)(R_x030), VS38	// v6 = vs38
    	VSLDOI	$8, V2, V2, V3
    	VSLDOI	$8, V4, V4, V5
    	VSLDOI	$8, V6, V6, V7
    
    loop:
    	MOVD	TBL_STRT, TBL
    	LVX	(TBL)(R_x000), KI
    
    	LXVD2X	(INP)(R0), VS40	// load v8 (=vs40) in advance
    	ADD	$16, INP
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 04 17:29:44 UTC 2024
    - 15.8K bytes
    - Viewed (0)
  7. src/hash/crc32/crc32_ppc64le.s

    	VXOR    V4,V12,V4
    	VXOR    V5,V13,V5
    	VXOR    V6,V14,V6
    	VXOR    V7,V15,V7
    
    #ifdef REFLECT
    	VSLDOI  $4,V0,zeroes,V0
    	VSLDOI  $4,V1,zeroes,V1
    	VSLDOI  $4,V2,zeroes,V2
    	VSLDOI  $4,V3,zeroes,V3
    	VSLDOI  $4,V4,zeroes,V4
    	VSLDOI  $4,V5,zeroes,V5
    	VSLDOI  $4,V6,zeroes,V6
    	VSLDOI  $4,V7,zeroes,V7
    #endif
    
    	LVX	(R4),V8
    	LVX	(R4+off16),V9
    	LVX	(R4+off32),V10
    	LVX	(R4+off48),V11
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon May 06 12:09:50 UTC 2024
    - 13.1K bytes
    - Viewed (0)
Back to top