Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 321 for R4 (0.14 sec)

  1. src/internal/bytealg/compare_mipsx.s

    	MOVW	a_base+0(FP), R3
    	MOVW	b_base+12(FP), R4
    	MOVW	a_len+4(FP), R1
    	MOVW	b_len+16(FP), R2
    	BEQ	R3, R4, samebytes
    	SGTU	R1, R2, R7
    	MOVW	R1, R8
    	CMOVN	R7, R2, R8	// R8 is min(R1, R2)
    
    	ADDU	R3, R8	// R3 is current byte in a, R8 is last byte in a to compare
    loop:
    	BEQ	R3, R8, samebytes
    
    	MOVBU	(R3), R6
    	ADDU	$1, R3
    	MOVBU	(R4), R7
    	ADDU	$1, R4
    	BEQ	R6, R7 , loop
    
    	SGTU	R6, R7, R8
    	MOVW	$-1, R6
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sat Nov 06 10:24:44 UTC 2021
    - 1.4K bytes
    - Viewed (0)
  2. src/runtime/memclr_ppc64x.s

    	STXVD2X VS32, (R3+R17)
    	ADD     $64, R3
    	ADD     $-64, R4
    	BDNZ    zero64          // dec ctr, br zero64 if ctr not 0
    	SRDCC   $3, R4, R6	// remaining doublewords
    	BEQ     nozerolarge
    
    lt64gt8:
    	CMP	R4, $32
    	BLT	lt32gt8
    	MOVD	$16, R8
    	STXVD2X	VS32, (R3+R0)
    	STXVD2X	VS32, (R3+R8)
    	ADD	$-32, R4
    	ADD	$32, R3
    lt32gt8:
    	CMP	R4, $16
    	BLT	lt16gt8
    	STXVD2X	VS32, (R3+R0)
    	ADD	$16, R3
    	ADD	$-16, R4
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue May 16 17:08:59 UTC 2023
    - 4.4K bytes
    - Viewed (0)
  3. src/runtime/memmove_loong64.s

    	ADDV	$1, R4
    	JMP	-6(PC)
    
    words:
    	// do 8 bytes at a time if there is room
    	ADDV	$-7, R9, R6 // R6 is end pointer-7
    
    	PCALIGN	$16
    	SGTU	R6, R4, R8
    	BEQ	R8, out
    	MOVV	(R5), R7
    	ADDV	$8, R5
    	MOVV	R7, (R4)
    	ADDV	$8, R4
    	JMP	-6(PC)
    
    out:
    	BEQ	R4, R9, done
    	MOVB	(R5), R7
    	ADDV	$1, R5
    	MOVB	R7, (R4)
    	ADDV	$1, R4
    	JMP	-5(PC)
    done:
    	RET
    
    backward:
    	ADDV	R6, R5 // from-end pointer
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon May 13 15:04:25 UTC 2024
    - 1.8K bytes
    - Viewed (0)
  4. src/hash/crc32/crc32_ppc64le.s

    	CMP	R15,$1		// Identify warm up pass
    	BEQ	next
    
    	// First warm up pass: load the bytes to process
    	LVX	(R4),V16
    	LVX	(R4+off16),V17
    	LVX	(R4+off32),V18
    	LVX	(R4+off48),V19
    	LVX	(R4+off64),V20
    	LVX	(R4+off80),V21
    	LVX	(R4+off96),V22
    	LVX	(R4+off112),V23
    	ADD	$128,R4		// bump up to next 128 bytes in buffer
    
    	VXOR	V16,V8,V16	// xor in initial CRC in V8
    
    next:
    	BC	18,0,first_warm_up_done
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon May 06 12:09:50 UTC 2024
    - 13.1K bytes
    - Viewed (0)
  5. src/internal/bytealg/indexbyte_loong64.s

    TEXT ·IndexByte<ABIInternal>(SB),NOSPLIT,$0-40
    	// R4 = b_base
    	// R5 = b_len
    	// R6 = b_cap (unused)
    	// R7 = byte to find
    	AND	$0xff, R7
    	MOVV	R4, R6		// store base for later
    	ADDV	R4, R5		// end
    	ADDV	$-1, R4
    
    	PCALIGN	$16
    loop:
    	ADDV	$1, R4
    	BEQ	R4, R5, notfound
    	MOVBU	(R4), R8
    	BNE	R7, R8, loop
    
    	SUBV	R6, R4		// remove base
    	RET
    
    notfound:
    	MOVV	$-1, R4
    	RET
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon May 13 15:04:25 UTC 2024
    - 905 bytes
    - Viewed (0)
  6. src/runtime/sys_linux_s390x.s

    	MOVD	m_vdsoPC(R6), R4
    	MOVD	R4, 16(R15)
    	MOVD	m_vdsoSP(R6), R4
    	MOVD	R4, 24(R15)
    
    	MOVD	R14, R8 		// Backup return address
    	MOVD	$sec+0(FP), R4 	// return parameter caller
    
    	MOVD	R8, m_vdsoPC(R6)
    	MOVD	R4, m_vdsoSP(R6)
    
    	MOVD	m_curg(R6), R5
    	CMP		g, R5
    	BNE		noswitch
    
    	MOVD	m_g0(R6), R4
    	MOVD	(g_sched+gobuf_sp)(R4), R15	// Set SP to g0 stack
    
    noswitch:
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Mar 24 18:53:44 UTC 2023
    - 12.5K bytes
    - Viewed (0)
  7. src/internal/runtime/atomic/atomic_loong64.s

    //		return 1;
    //	} else
    //		return 0;
    TEXT ·Cas(SB), NOSPLIT, $0-17
    	MOVV	ptr+0(FP), R4
    	MOVW	old+8(FP), R5
    	MOVW	new+12(FP), R6
    	DBAR
    cas_again:
    	MOVV	R6, R7
    	LL	(R4), R8
    	BNE	R5, R8, cas_fail
    	SC	R7, (R4)
    	BEQ	R7, cas_again
    	MOVV	$1, R4
    	MOVB	R4, ret+16(FP)
    	DBAR
    	RET
    cas_fail:
    	MOVV	$0, R4
    	JMP	-4(PC)
    
    // bool	cas64(uint64 *ptr, uint64 old, uint64 new)
    // Atomically:
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  8. src/internal/bytealg/indexbyte_arm64.s

    	// Condition flags come from SUBS in the loop
    	BHS	tail
    
    masklast:
    	// Clear the irrelevant upper bits
    	ADD	R9, R10, R4
    	AND	$0x1f, R4, R4
    	SUB	$0x20, R4, R4
    	NEG	R4<<1, R4
    	LSL	R4, R6, R6
    	LSR	R4, R6, R6
    
    tail:
    	// Check that we have found a character
    	CBZ	R6, fail
    	// Count the trailing zeros using bit reversing
    	RBIT	R6, R6
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Nov 08 20:52:47 UTC 2018
    - 3.3K bytes
    - Viewed (0)
  9. src/internal/bytealg/equal_arm64.s

    	TBZ	$3, R2, lt_8
    	MOVD	(R0), R4
    	MOVD	(R1), R5
    	EOR	R4, R5
    	CBNZ	R5, not_equal
    	SUB	$8, R2, R6	// offset of the last 8 bytes
    	MOVD	(R0)(R6), R4
    	MOVD	(R1)(R6), R5
    	EOR	R4, R5
    	CBNZ	R5, not_equal
    	B	equal
    lt_8:
    	TBZ	$2, R2, lt_4
    	MOVWU	(R0), R4
    	MOVWU	(R1), R5
    	EOR	R4, R5
    	CBNZ	R5, not_equal
    	SUB	$4, R2, R6	// offset of the last 4 bytes
    	MOVWU	(R0)(R6), R4
    	MOVWU	(R1)(R6), R5
    	EOR	R4, R5
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Jan 24 16:07:25 UTC 2024
    - 2.5K bytes
    - Viewed (0)
  10. src/math/dim_s390x.s

    TEXT ·archMax(SB),NOSPLIT,$0
    	// +Inf special cases
    	MOVD    $PosInf, R4
    	MOVD    x+0(FP), R8
    	CMPUBEQ R4, R8, isPosInf
    	MOVD    y+8(FP), R9
    	CMPUBEQ R4, R9, isPosInf
    	// NaN special cases
    	MOVD    $~(1<<63), R5 // bit mask
    	MOVD    $PosInf, R4
    	MOVD    R8, R2
    	AND     R5, R2 // x = |x|
    	CMPUBLT R4, R2, isMaxNaN
    	MOVD    R9, R3
    	AND     R5, R3 // y = |y|
    	CMPUBLT R4, R3, isMaxNaN
    	// ±0 special cases
    	OR      R3, R2
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 15 15:48:19 UTC 2021
    - 2K bytes
    - Viewed (0)
Back to top