Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 6 of 6 for ADDL (0.04 sec)

  1. src/crypto/sha256/sha256block_amd64.s

    	MOVQ dig+0(FP), BP
    	ADDL (0*4)(BP), R8  // H0 = a + H0
    	MOVL R8, (0*4)(BP)
    	ADDL (1*4)(BP), R9  // H1 = b + H1
    	MOVL R9, (1*4)(BP)
    	ADDL (2*4)(BP), R10 // H2 = c + H2
    	MOVL R10, (2*4)(BP)
    	ADDL (3*4)(BP), R11 // H3 = d + H3
    	MOVL R11, (3*4)(BP)
    	ADDL (4*4)(BP), R12 // H4 = e + H4
    	MOVL R12, (4*4)(BP)
    	ADDL (5*4)(BP), R13 // H5 = f + H5
    	MOVL R13, (5*4)(BP)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 04 17:29:44 UTC 2024
    - 47.3K bytes
    - Viewed (0)
  2. src/crypto/sha1/sha1block_amd64.s

    	ROUND4(AX, BX, CX, DX, BP, 75)
    	ROUND4(BP, AX, BX, CX, DX, 76)
    	ROUND4(DX, BP, AX, BX, CX, 77)
    	ROUND4(CX, DX, BP, AX, BX, 78)
    	ROUND4(BX, CX, DX, BP, AX, 79)
    
    	ADDL	R11, AX
    	ADDL	R12, BX
    	ADDL	R13, CX
    	ADDL	R14, DX
    	ADDL	R15, BP
    
    	ADDQ	$64, SI
    	CMPQ	SI, DI
    	JB	loop
    
    end:
    	MOVQ	dig+0(FP), DI
    	MOVL	AX, (0*4)(DI)
    	MOVL	BX, (1*4)(DI)
    	MOVL	CX, (2*4)(DI)
    	MOVL	DX, (3*4)(DI)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 04 17:29:44 UTC 2024
    - 31.5K bytes
    - Viewed (0)
  3. test/codegen/arithmetic.go

    	// amd64:`SUBQ\s[A-Z]+,\s24\([A-Z]+\)`
    	arr[3] -= b
    	// 386:`DECL\s16\([A-Z]+\)`
    	arr[4]--
    	// 386:`ADDL\s[$]-20,\s20\([A-Z]+\)`
    	arr[5] -= 20
    	// 386:`SUBL\s\([A-Z]+\)\([A-Z]+\*4\),\s[A-Z]+`
    	ef -= arr[b]
    	// 386:`SUBL\s[A-Z]+,\s\([A-Z]+\)\([A-Z]+\*4\)`
    	arr[c] -= b
    	// 386:`ADDL\s[$]-15,\s\([A-Z]+\)\([A-Z]+\*4\)`
    	arr[d] -= 15
    	// 386:`DECL\s\([A-Z]+\)\([A-Z]+\*4\)`
    	arr[b]--
    	// amd64:`DECQ\s64\([A-Z]+\)`
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 15:28:00 UTC 2024
    - 15.2K bytes
    - Viewed (0)
  4. src/runtime/asm_386.s

    	/* copy return values back */		\
    	MOVL	stackArgsType+0(FP), DX;		\
    	MOVL	stackArgs+8(FP), DI;		\
    	MOVL	stackArgsSize+12(FP), CX;		\
    	MOVL	stackRetOffset+16(FP), BX;		\
    	MOVL	SP, SI;				\
    	ADDL	BX, DI;				\
    	ADDL	BX, SI;				\
    	SUBL	BX, CX;				\
    	CALL	callRet<>(SB);			\
    	RET
    
    // callRet copies return values back at the end of call*. This is a
    // separate function so it can allocate stack space for the arguments
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Mar 15 15:45:13 UTC 2024
    - 43.1K bytes
    - Viewed (0)
  5. src/crypto/aes/gcm_amd64.s

    #define ctx DX
    #define ctrPtr CX
    #define ptx SI
    #define ks AX
    #define tPtr R8
    #define ptxLen R9
    #define aluCTR R10
    #define aluTMP R11
    #define aluK R12
    #define NR R13
    
    #define increment(i) ADDL $1, aluCTR; MOVL aluCTR, aluTMP; XORL aluK, aluTMP; BSWAPL aluTMP; MOVL aluTMP, (3*4 + 8*16 + i*16)(SP)
    #define aesRnd(k) AESENC k, B0; AESENC k, B1; AESENC k, B2; AESENC k, B3; AESENC k, B4; AESENC k, B5; AESENC k, B6; AESENC k, B7
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 04 17:29:44 UTC 2024
    - 23.4K bytes
    - Viewed (0)
  6. src/runtime/race_amd64.s

    // Add
    TEXT	sync∕atomic·AddInt32(SB), NOSPLIT|NOFRAME, $0-20
    	GO_ARGS
    	MOVQ	$__tsan_go_atomic32_fetch_add(SB), AX
    	CALL	racecallatomic<>(SB)
    	MOVL	add+8(FP), AX	// convert fetch_add to add_fetch
    	ADDL	AX, ret+16(FP)
    	RET
    
    TEXT	sync∕atomic·AddInt64(SB), NOSPLIT|NOFRAME, $0-24
    	GO_ARGS
    	MOVQ	$__tsan_go_atomic64_fetch_add(SB), AX
    	CALL	racecallatomic<>(SB)
    	MOVQ	add+8(FP), AX	// convert fetch_add to add_fetch
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:37:29 UTC 2024
    - 15.1K bytes
    - Viewed (0)
Back to top