Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 39 for r13 (0.02 sec)

  1. src/runtime/race_amd64.s

    	MOVQ	g(R12), R14
    	MOVQ	g_m(R14), R13
    	MOVQ	m_g0(R13), R15
    	CMPQ	R13, R15
    	JEQ	noswitch	// branch if already on g0
    	MOVQ	R15, g(R12)	// g = m->g0
    	MOVQ	R15, R14	// set g register
    	PUSHQ	RARG1	// func arg
    	PUSHQ	RARG0	// func arg
    	CALL	runtime·racecallback(SB)
    	POPQ	R12
    	POPQ	R12
    	// All registers are smashed after Go code, reload.
    	get_tls(R12)
    	MOVQ	g(R12), R13
    	MOVQ	g_m(R13), R13
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:37:29 UTC 2024
    - 15.1K bytes
    - Viewed (0)
  2. src/runtime/race_arm64.s

    	// load_g will clobber R0, Save R0
    	MOVD	R0, R13
    	load_g
    	// restore R0
    	MOVD	R13, R0
    	MOVD	g_m(g), R13
    	MOVD	m_g0(R13), R14
    	CMP	R14, g
    	BEQ	noswitch	// branch if already on g0
    	MOVD	R14, g
    
    	MOVD	R0, 8(RSP)	// func arg
    	MOVD	R1, 16(RSP)	// func arg
    	BL	runtime·racecallback(SB)
    
    	// All registers are smashed after Go code, reload.
    	MOVD	g_m(g), R13
    	MOVD	m_curg(R13), g	// g = m->curg
    ret:
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:37:29 UTC 2024
    - 15.5K bytes
    - Viewed (0)
  3. src/crypto/md5/md5block_arm.s

    	BEQ	aligned			// aligned detected - skip copy
    
    	// Copy the unaligned source data into the aligned temporary buffer
    	// memmove(to=4(R13), from=8(R13), n=12(R13)) - Corrupts all registers
    	MOVW	$buf, Rtable	// to
    	MOVW	$64, Rc0		// n
    	MOVM.IB	[Rtable,Rdata,Rc0], (R13)
    	BL	runtime·memmove(SB)
    
    	// Point to the local aligned copy of the data
    	MOVW	$buf, Rdata
    
    aligned:
    	// Point to the table of constants
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 04 17:29:44 UTC 2024
    - 8.8K bytes
    - Viewed (0)
  4. src/crypto/md5/md5block_arm64.s

    	MOVD	dig+0(FP), R0
    	MOVD	p+8(FP), R1
    	MOVD	p_len+16(FP), R2
    	AND	$~63, R2
    	CBZ	R2, zero
    
    	ADD	R1, R2, R21
    	LDPW	(0*8)(R0), (R4, R5)
    	LDPW	(1*8)(R0), (R6, R7)
    
    loop:
    	MOVW	R4, R12
    	MOVW	R5, R13
    	MOVW	R6, R14
    	MOVW	R7, R15
    
    	MOVW	(0*4)(R1), R8
    	MOVW	R7, R9
    
    #define ROUND1(a, b, c, d, index, const, shift) \
    	ADDW	$const, a; \
    	ADDW	R8, a; \
    	MOVW	(index*4)(R1), R8; \
    	EORW	c, R9; \
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 04 17:29:44 UTC 2024
    - 4.1K bytes
    - Viewed (0)
  5. src/internal/bytealg/compare_loong64.s

    	// R4 = a_base
    	// R5 = a_len
    	// R6 = b_base
    	// R7 = b_len
    	JMP	cmpbody<>(SB)
    
    // On entry:
    // R5 length of a
    // R7 length of b
    // R4 points to the start of a
    // R6 points to the start of b
    // R13 points to the return value (-1/0/1)
    TEXT cmpbody<>(SB),NOSPLIT|NOFRAME,$0
    	BEQ	R4, R6, samebytes // same start of a and b
    
    	SGTU	R5, R7, R9
    	BNE	R0, R9, r2_lt_r1
    	MOVV	R5, R14
    	JMP	entry
    r2_lt_r1:
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon May 13 15:04:25 UTC 2024
    - 1.7K bytes
    - Viewed (0)
  6. src/crypto/md5/md5block_amd64.s

    	LEAQ	(SI)(DX*1),	DI
    	MOVL	(0*4)(BP),	AX
    	MOVL	(1*4)(BP),	BX
    	MOVL	(2*4)(BP),	CX
    	MOVL	(3*4)(BP),	DX
    	MOVL	$0xffffffff,	R11
    
    	CMPQ	SI,		DI
    	JEQ	end
    
    loop:
    	MOVL	AX,		R12
    	MOVL	BX,		R13
    	MOVL	CX,		R14
    	MOVL	DX,		R15
    
    	MOVL	(0*4)(SI),	R8
    	MOVL	DX,		R9
    
    #define ROUND1(a, b, c, d, index, const, shift) \
    	XORL	c, R9; \
    	ADDL	$const, a; \
    	ADDL	R8, a; \
    	ANDL	b, R9; \
    	XORL	d, R9; \
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 04 17:29:44 UTC 2024
    - 4.8K bytes
    - Viewed (0)
  7. src/crypto/sha1/sha1block_amd64.s

    	MOVQ	p_len+16(FP),	DX
    	SHRQ	$6,		DX
    	SHLQ	$6,		DX
    
    	MOVQ	$K_XMM_AR<>(SB), R8
    
    	MOVQ	DI, R9
    	MOVQ	SI, R10
    	LEAQ	64(SI), R13
    
    	ADDQ	SI, DX
    	ADDQ	$64, DX
    	MOVQ	DX, R11
    
    	CMPQ	R13, R11
    	CMOVQCC	R8, R13
    
    	VMOVDQU	BSWAP_SHUFB_CTL<>(SB), Y10
    
    	CALC // RET is inside macros
    
    DATA K_XMM_AR<>+0x00(SB)/4,$0x5a827999
    DATA K_XMM_AR<>+0x04(SB)/4,$0x5a827999
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 04 17:29:44 UTC 2024
    - 31.5K bytes
    - Viewed (0)
  8. src/crypto/sha1/sha1block_arm.s

    	MOVW	Rt0, p_end	// pointer to end of data
    
    	// Load up initial SHA-1 accumulator
    	MOVW	dig+0(FP), Rt0
    	MOVM.IA (Rt0), [Ra,Rb,Rc,Rd,Re]
    
    loop:
    	// Save registers at SP+4 onwards
    	MOVM.IB [Ra,Rb,Rc,Rd,Re], (R13)
    
    	MOVW	$w_buf, Rw
    	MOVW	$0x5A827999, Rconst
    	MOVW	$3, Rctr
    loop1:	ROUND1(Ra, Rb, Rc, Rd, Re)
    	ROUND1(Re, Ra, Rb, Rc, Rd)
    	ROUND1(Rd, Re, Ra, Rb, Rc)
    	ROUND1(Rc, Rd, Re, Ra, Rb)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 04 17:29:44 UTC 2024
    - 5.6K bytes
    - Viewed (0)
  9. src/internal/runtime/atomic/atomic_arm.s

    // do need to set the SP delta back).
    
    // Check if R1 is 8-byte aligned, panic if not.
    // Clobbers R2.
    #define CHECK_ALIGN \
    	AND.S	$7, R1, R2 \
    	BEQ 	4(PC) \
    	MOVW.W	R14, -4(R13) /* prepare a real frame */ \
    	BL	·panicUnaligned(SB) \
    	ADD	$4, R13 /* compensate SP delta */
    
    TEXT ·Cas64(SB),NOSPLIT,$-4-21
    	NO_LOCAL_POINTERS
    	MOVW	addr+0(FP), R1
    	CHECK_ALIGN
    
    #ifndef GOARM_7
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 5.7K bytes
    - Viewed (0)
  10. src/runtime/asm_arm64.s

    	MOVD	R9, R0
    
    	// Now on a scheduling stack (a pthread-created stack).
    	// Save room for two of our pointers /*, plus 32 bytes of callee
    	// save area that lives on the caller stack. */
    	MOVD	RSP, R13
    	SUB	$16, R13
    	MOVD	R13, RSP
    	MOVD	R4, 0(RSP)	// save old g on stack
    	MOVD	(g_stack+stack_hi)(R4), R4
    	SUB	R2, R4
    	MOVD	R4, 8(RSP)	// save depth in old g stack (can't just save SP, as stack might be copied during a callback)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sat May 11 20:38:24 UTC 2024
    - 43.4K bytes
    - Viewed (0)
Back to top