Search Options

Results per page
Sort
Preferred Languages
Advance

Results 51 - 60 of 321 for R4 (0.32 sec)

  1. src/crypto/aes/asm_s390x.s

    	MOVD	a_base+24(FP), R2
    	MOVD	b_base+48(FP), R3
    	MOVD	a_len+32(FP), R4
    	MOVD	b_len+56(FP), R5
    	CMPBLE	R4, R5, skip
    	MOVD	R5, R4
    skip:
    	MOVD	R4, ret+72(FP)
    	MOVD	$0, R5
    	CMPBLT	R4, $8, tail
    loop:
    	MOVD	0(R2)(R5*1), R7
    	MOVD	0(R3)(R5*1), R8
    	XOR	R7, R8
    	MOVD	R8, 0(R1)(R5*1)
    	LAY	8(R5), R5
    	SUB	$8, R4
    	CMPBGE	R4, $8, loop
    tail:
    	CMPBEQ	R4, $0, done
    	MOVB	0(R2)(R5*1), R7
    	MOVB	0(R3)(R5*1), R8
    	XOR	R7, R8
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 04 17:29:44 UTC 2024
    - 4.4K bytes
    - Viewed (0)
  2. src/runtime/rt0_freebsd_arm64.s

    	// Synchronous initialization.
    	MOVD	$runtime·libpreinit(SB), R4
    	BL	(R4)
    
    	// Create a new thread to do the runtime initialization and return.
    	MOVD	_cgo_sys_thread_create(SB), R4
    	CBZ	R4, nocgo
    	MOVD	$_rt0_arm64_freebsd_lib_go(SB), R0
    	MOVD	$0, R1
    	SUB	$16, RSP	// reserve 16 bytes for sp-8 where fp may be saved.
    	BL	(R4)
    	ADD	$16, RSP
    	B	restore
    
    nocgo:
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Mar 30 01:28:43 UTC 2022
    - 1.9K bytes
    - Viewed (0)
  3. src/runtime/sys_openbsd_mips64.s

    	MULVU	R3, R4
    	MOVV	LO, R4
    	SUBVU	R4, R5
    	MOVV	R5, 16(R29)		// tv_nsec
    
    	ADDV	$8, R29, R4		// arg 1 - rqtp
    	MOVV	$0, R5			// arg 2 - rmtp
    	MOVV	$91, R2			// sys_nanosleep
    	SYSCALL
    	RET
    
    TEXT runtime·getthrid(SB),NOSPLIT,$0-4
    	MOVV	$299, R2		// sys_getthrid
    	SYSCALL
    	MOVW	R2, ret+0(FP)
    	RET
    
    TEXT runtime·thrkill(SB),NOSPLIT,$0-16
    	MOVW	tid+0(FP), R4		// arg 1 - tid
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Jun 06 18:49:01 UTC 2023
    - 8.8K bytes
    - Viewed (0)
  4. src/crypto/internal/edwards25519/field/_asm/fe_amd64_asm.go

    	addMul64(r3, 1, a1, b2)
    	addMul64(r3, 1, a2, b1)
    	addMul64(r3, 1, a3, b0)
    	addMul64(r3, 19, a4, b4)
    
    	// r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
    	r4 := uint128{"r4", GP64(), GP64()}
    	mul64(r4, 1, a0, b4)
    	addMul64(r4, 1, a1, b3)
    	addMul64(r4, 1, a2, b2)
    	addMul64(r4, 1, a3, b1)
    	addMul64(r4, 1, a4, b0)
    
    	Comment("First reduction chain")
    	maskLow51Bits := GP64()
    	MOVQ(Imm((1<<51)-1), maskLow51Bits)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 04 17:29:44 UTC 2024
    - 7.2K bytes
    - Viewed (0)
  5. src/cmd/asm/internal/asm/testdata/s390x.s

    	ADDE	R1, R2                // b9880021
    	SUB	R3, R4                // b9090043
    	SUB	R3, R4, R5            // b9e93054
    	SUB	$8192, R3             // a73be000
    	SUB	$8192, R3, R4         // ec43e00000d9
    	SUBC	R1, R2                // b90b0021
    	SUBC	$1, R1, R2            // ec21ffff00db
    	SUBC	R2, R3, R4            // b9eb2043
    	SUBW	R3, R4                // 1b43
    	SUBW	R3, R4, R5            // b9f93054
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Nov 22 03:55:32 UTC 2023
    - 21.6K bytes
    - Viewed (0)
  6. src/runtime/sys_linux_ppc64x.s

    	MOVD	$0x8637bd06, R4
    	MULLD	R3, R4, R4	// Convert usec to S.
    	SRD	$51, R4, R4
    	MOVD	R4, 8(R1)	// Store to tv_sec
    
    	MOVD	$1000000, R5
    	MULLW	R4, R5, R5	// Convert tv_sec back into uS
    	SUB	R5, R3, R5	// Compute remainder uS.
    	MULLD	$1000, R5, R5	// Convert to nsec
    	MOVD	R5, 16(R1)	// Store to tv_nsec
    
    	// nanosleep(&ts, 0)
    	ADD	$8, R1, R3
    	MOVW	$0, R4
    	SYSCALL	$SYS_nanosleep
    	RET
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 18:17:17 UTC 2024
    - 18.1K bytes
    - Viewed (0)
  7. src/runtime/memclr_mips64x.s

    	VMOVB	W0, 112(R1)
    
    	ADDVU	$128, R1
    	SGTU	R6, R1, R3
    	BNE	R3, R0, msa_large_loop
    	BEQ	R5, R0, done
    	VMOVB	W0, -128(R4)
    	VMOVB	W0, -112(R4)
    	VMOVB	W0, -96(R4)
    	VMOVB	W0, -80(R4)
    	VMOVB	W0, -64(R4)
    	VMOVB	W0, -48(R4)
    	VMOVB	W0, -32(R4)
    	VMOVB	W0, -16(R4)
    	JMP	done
    
    no_msa:
    	// if less than 8 bytes, do one byte at a time
    	SGTU	$8, R2, R3
    	BNE	R3, out
    
    	// do one byte at a time until 8-aligned
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sat Nov 06 10:24:44 UTC 2021
    - 1.7K bytes
    - Viewed (0)
  8. src/runtime/sys_linux_mipsx.s

    	RET
    
    TEXT runtime·usleep(SB),NOSPLIT,$28-4
    	MOVW	usec+0(FP), R3
    	MOVW	R3, R5
    	MOVW	$1000000, R4
    	DIVU	R4, R3
    	MOVW	LO, R3
    	MOVW	R3, 24(R29)
    	MOVW	$1000, R4
    	MULU	R3, R4
    	MOVW	LO, R4
    	SUBU	R4, R5
    	MOVW	R5, 28(R29)
    
    	// nanosleep(&ts, 0)
    	ADDU	$24, R29, R4
    	MOVW	$0, R5
    	MOVW	$SYS_nanosleep, R2
    	SYSCALL
    	RET
    
    TEXT runtime·gettid(SB),NOSPLIT,$0-4
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Oct 18 20:57:24 UTC 2022
    - 9.7K bytes
    - Viewed (0)
  9. src/internal/bytealg/index_s390x.s

    // sep: string to search for
    // R1=&s[0], R2=len(s)
    // R3=&sep[0], R4=len(sep)
    // R5=&ret (int)
    // Caller must confirm availability of vx facility before calling.
    TEXT indexbody<>(SB),NOSPLIT|NOFRAME,$0
    	CMPBGT	R4, R2, notfound
    	ADD	R1, R2
    	SUB	R4, R2 // R2=&s[len(s)-len(sep)] (last valid index)
    	CMPBEQ	R4, $0, notfound
    	SUB	$1, R4 // R4=len(sep)-1 for use as VLL index
    	VLL	R4, (R3), V0 // contains first 16 bytes of sep
    	MOVD	R1, R7
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sun Mar 04 19:49:44 UTC 2018
    - 5.5K bytes
    - Viewed (0)
  10. src/hash/crc32/crc32_s390x.s

    	ADD     $(-64), R4               // LEN = LEN - 64
    
    	CMP     R4, $64
    	BGE     fold_64bytes_loop
    
    less_than_64bytes:
    	// Fold V1 to V4 into a single 128-bit value in V1
    	VGFMAG  CONST_R4R3, V1, V2, V1
    	VGFMAG  CONST_R4R3, V1, V3, V1
    	VGFMAG  CONST_R4R3, V1, V4, V1
    
    	// Check whether to continue with 64-bit folding
    	CMP R4, $16
    	BLT final_fold
    
    fold_16bytes_loop:
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Apr 20 00:49:17 UTC 2021
    - 7.6K bytes
    - Viewed (0)
Back to top