Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 124 for r6 (0.04 sec)

  1. src/runtime/memmove_loong64.s

    	ADDV	$-7, R9, R6 // R6 is end pointer-7
    
    	PCALIGN	$16
    	SGTU	R6, R4, R8
    	BEQ	R8, out
    	MOVV	(R5), R7
    	ADDV	$8, R5
    	MOVV	R7, (R4)
    	ADDV	$8, R4
    	JMP	-6(PC)
    
    out:
    	BEQ	R4, R9, done
    	MOVB	(R5), R7
    	ADDV	$1, R5
    	MOVB	R7, (R4)
    	ADDV	$1, R4
    	JMP	-5(PC)
    done:
    	RET
    
    backward:
    	ADDV	R6, R5 // from-end pointer
    	ADDV	R4, R6, R9 // to-end pointer
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon May 13 15:04:25 UTC 2024
    - 1.8K bytes
    - Viewed (0)
  2. src/cmd/asm/internal/asm/testdata/arm64.s

    	LDADDLD	R5, (R6), ZR                         // df0065f8
    	LDADDLW	R5, (R6), ZR                         // df0065b8
    	LDADDLH	R5, (R6), ZR                         // df006578
    	LDADDLB	R5, (R6), ZR                         // df006538
    	LDCLRD	R5, (R6), ZR                         // df1025f8
    	LDCLRW	R5, (R6), ZR                         // df1025b8
    	LDCLRH	R5, (R6), ZR                         // df102578
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Dec 08 03:28:17 UTC 2023
    - 94.9K bytes
    - Viewed (0)
  3. src/crypto/subtle/xor_ppc64x.s

    	ADD	$32, R8
    	ADD	$-32, R6
    	CMP	R6, $8
    	BLE	small
    	// Case for 8 <= n < 32 bytes
    	// Process 16 bytes if available
    xor16:
    	CMP	R6, $16
    	BLT	xor8
    	LXVD2X	(R4)(R8), VS32
    	LXVD2X	(R5)(R8), VS33
    	XXLXOR	VS32, VS33, VS32
    	STXVD2X	VS32, (R3)(R8)
    	ADD	$16, R8
    	ADD	$-16, R6
    small:
    	CMP	R6, $0
    	BC	12,2,LR		// BEQLR
    xor8:
    #ifdef GOPPC64_power10
    	SLD	$56,R6,R17
    	ADD	R4,R8,R18
    	ADD	R5,R8,R19
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 18:17:17 UTC 2024
    - 2.9K bytes
    - Viewed (0)
  4. src/internal/runtime/atomic/atomic_s390x.s

    	MOVD	ptr+0(FP), R4
    	MOVW	new+8(FP), R3
    	MOVW	(R4), R6
    repeat:
    	CS	R6, R3, (R4) // if R6==(R4) then (R4)=R3 else R6=(R4)
    	BNE	repeat
    	MOVW	R6, ret+16(FP)
    	RET
    
    // func Xchg64(ptr *uint64, new uint64) uint64
    TEXT ·Xchg64(SB), NOSPLIT, $0-24
    	MOVD	ptr+0(FP), R4
    	MOVD	new+8(FP), R3
    	MOVD	(R4), R6
    repeat:
    	CSG	R6, R3, (R4) // if R6==(R4) then (R4)=R3 else R6=(R4)
    	BNE	repeat
    	MOVD	R6, ret+16(FP)
    	RET
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 7.1K bytes
    - Viewed (0)
  5. src/math/big/arith_arm64.s

    	MOVD.W	-8(R2), R6
    	LSR	R4, R6, R5	// return value
    	LSL	R3, R6, R8	// x[i] << s
    	SUB	$1, R1
    one:	TBZ	$0, R1, two
    	MOVD.W	-8(R2), R6
    	LSR	R4, R6, R7
    	ORR	R8, R7
    	LSL	R3, R6, R8
    	SUB	$1, R1
    	MOVD.W	R7, -8(R0)
    two:
    	TBZ	$1, R1, loop
    	LDP.W	-16(R2), (R6, R7)
    	LSR	R4, R7, R10
    	ORR	R8, R10
    	LSL	R3, R7
    	LSR	R4, R6, R9
    	ORR	R7, R9
    	LSL	R3, R6, R8
    	SUB	$2, R1
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Oct 19 23:33:27 UTC 2023
    - 11.8K bytes
    - Viewed (0)
  6. src/internal/bytealg/equal_arm64.s

    	MOVD	(R1), R5
    	EOR	R4, R5
    	CBNZ	R5, not_equal
    	SUB	$8, R2, R6	// offset of the last 8 bytes
    	MOVD	(R0)(R6), R4
    	MOVD	(R1)(R6), R5
    	EOR	R4, R5
    	CBNZ	R5, not_equal
    	B	equal
    lt_8:
    	TBZ	$2, R2, lt_4
    	MOVWU	(R0), R4
    	MOVWU	(R1), R5
    	EOR	R4, R5
    	CBNZ	R5, not_equal
    	SUB	$4, R2, R6	// offset of the last 4 bytes
    	MOVWU	(R0)(R6), R4
    	MOVWU	(R1)(R6), R5
    	EOR	R4, R5
    	CBNZ	R5, not_equal
    	B	equal
    lt_4:
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Jan 24 16:07:25 UTC 2024
    - 2.5K bytes
    - Viewed (0)
  7. src/cmd/asm/internal/asm/testdata/ppc64.s

    	RLWNM $3, R4, $29, $31, R6      // 54861f7e
    	RLWNM $0, R4, $29, $31, R6      // 5486077e
    	RLWNM R0, R4, $29, $31, R6      // 5c86077e
    	RLWNM R3, R4, $7, R6            // 5c861f7e
    	RLWNM R3, R4, $29, $31, R6      // 5c861f7e
    	RLWNMCC $3, R4, $7, R6          // 54861f7f
    	RLWNMCC $3, R4, $29, $31, R6    // 54861f7f
    	RLWNMCC R3, R4, $7, R6          // 5c861f7f
    	RLWNMCC R3, R4, $29, $31, R6    // 5c861f7f
    	RLDMI $0, R4, $7, R6            // 7886076c
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 21:53:50 UTC 2024
    - 50.2K bytes
    - Viewed (0)
  8. src/crypto/internal/bigmod/nat_ppc64x.s

    TEXT ·addMulVVW1024(SB), $0-32
    	MOVD	$4, R6 // R6 = z_len/4
    	JMP		addMulVVWx<>(SB)
    
    // func addMulVVW1536(z, x *uint, y uint) (c uint)
    TEXT ·addMulVVW1536(SB), $0-32
    	MOVD	$6, R6 // R6 = z_len/4
    	JMP		addMulVVWx<>(SB)
    
    // func addMulVVW2048(z, x *uint, y uint) (c uint)
    TEXT ·addMulVVW2048(SB), $0-32
    	MOVD	$8, R6 // R6 = z_len/4
    	JMP		addMulVVWx<>(SB)
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Jan 25 19:32:43 UTC 2024
    - 1.9K bytes
    - Viewed (0)
  9. src/internal/runtime/syscall/asm_linux_loong64.s

    // a1  | R5          | R4
    // a2  | R6          | R5
    // a3  | R7          | R6
    // a4  | R8          | R7
    // a5  | R9          | R8
    // a6  | R10         | R9
    //
    // r1  | R4          | R4
    // r2  | R5          | R5
    // err | R6          | part of R4
    TEXT ·Syscall6<ABIInternal>(SB),NOSPLIT,$0-80
    	MOVV	R4, R11  // syscall entry
    	MOVV	R5, R4
    	MOVV	R6, R5
    	MOVV	R7, R6
    	MOVV	R8, R7
    	MOVV	R9, R8
    	MOVV	R10, R9
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon May 13 15:04:25 UTC 2024
    - 1013 bytes
    - Viewed (0)
  10. src/cmd/vendor/golang.org/x/sys/unix/asm_zos_s390x.s

    	LMG  0(R8), R5, R6
    	MOVD $0, 0(R9)          // R9 address of SAVSTACK_ASYNC
    	LE_CALL                 // balr R7, R6 (return #1)
    	NOPH
    	MOVD R3, ret+32(FP)
    	CMP  R3, $-1            // compare result to -1
    	BNE  done
    
    	// retrieve errno and errno2
    	MOVD  zosLibVec<>(SB), R8
    	ADD   $(__errno), R8
    	LMG   0(R8), R5, R6
    	LE_CALL                   // balr R7, R6 __errno (return #3)
    	NOPH
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 16:12:58 UTC 2024
    - 11.2K bytes
    - Viewed (0)
Back to top