Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 4 of 4 for V9 (0.03 sec)

  1. src/cmd/asm/internal/asm/testdata/arm64.s

    	FMOVD	F1, ZR                          // 3f00669e
    	FMOVS	F1, ZR                          // 3f00261e
    	VUADDW	V9.B8, V12.H8, V14.H8           // 8e11292e
    	VUADDW	V13.H4, V10.S4, V11.S4          // 4b116d2e
    	VUADDW	V21.S2, V24.D2, V29.D2          // 1d13b52e
    	VUADDW2	V9.B16, V12.H8, V14.H8          // 8e11296e
    	VUADDW2	V13.H8, V20.S4, V30.S4          // 9e126d6e
    	VUADDW2	V21.S4, V24.D2, V29.D2          // 1d13b56e
    Registered: Tue Dec 30 11:13:12 UTC 2025
    - Last Modified: Mon Nov 10 17:34:13 UTC 2025
    - 96.1K bytes
    - Viewed (0)
  2. src/cmd/asm/internal/asm/testdata/arm64error.s

    	VST1.P	[V4.S4], 8(R1)                                   // ERROR "invalid post-increment offset"
    	VLD1.P	32(R1), [V8.S4, V9.S4, V10.S4]                   // ERROR "invalid post-increment offset"
    	VLD1.P	48(R1), [V7.S4, V8.S4, V9.S4, V10.S4]            // ERROR "invalid post-increment offset"
    	VPMULL	V1.D1, V2.H4, V3.Q1                              // ERROR "invalid arrangement"
    Registered: Tue Dec 30 11:13:12 UTC 2025
    - Last Modified: Tue Oct 14 19:00:00 UTC 2025
    - 38.4K bytes
    - Viewed (0)
  3. src/cmd/asm/internal/asm/testdata/loong64enc1.s

    	XVMOVQ		X27.V[0], X9    // 69e30377
    
    	// Move vector element to vector.
    	VMOVQ		V1.B[3], V9.B16 // 298cf772
    	VMOVQ		V2.H[2], V8.H8  // 48c8f772
    	VMOVQ		V3.W[1], V7.W4  // 67e4f772
    	VMOVQ		V4.V[0], V6.V2  // 86f0f772
    
    	// Move vector register to vector register.
    	VMOVQ		V1, V9		// 29002d73
    	VMOVQ		V2, V8		// 48002d73
    	XVMOVQ		X3, X7		// 67002d77
    	XVMOVQ		X4, X6		// 86002d77
    
    Registered: Tue Dec 30 11:13:12 UTC 2025
    - Last Modified: Thu Nov 27 00:46:52 UTC 2025
    - 44.5K bytes
    - Viewed (0)
  4. lib/fips140/v1.1.0-rc1.zip

    ) DO4_CIPHER(V0,V1,V2,V3,V8,R4,VCIPHERLAST) XOR_STORE(V9,V0,R5,R0) XOR_STORE(V10,V1,R5,R8) XOR_STORE(V11,V2,R5,R9) XOR_STORE(V12,V3,R5,R10) RET //func ctrBlocks8Asm(nr int, xk *[60]uint32, dst, src *[8 * BlockSize]byte, ivlo, ivhi uint64) TEXT ·ctrBlocks8Asm(SB), NOSPLIT|NOFRAME, $0 CTRBLOCK_PROLOGUE XXLEQV V8, V8, V8 // V8 is -1 VSUBUQM V0, V8, V1 // Vi = IV + i (as IV - (-1)) VADDUQM V8, V8, V9 // V9 is -2 VSUBUQM V0, V9, V2 VSUBUQM V1, V9, V3 VSUBUQM V2, V9, V4 VSUBUQM V3, V9, V5 VSUBUQM V4, V9,...
    Registered: Tue Dec 30 11:13:12 UTC 2025
    - Last Modified: Thu Dec 11 16:27:41 UTC 2025
    - 663K bytes
    - Viewed (0)
Back to top