Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 73 for X16 (0.04 sec)

  1. src/crypto/sha512/sha512block_riscv64.s

    	SHA512ROUND0(7, X11, X12, X13, X14, X15, X16, X17, X10)
    	SHA512ROUND0(8, X10, X11, X12, X13, X14, X15, X16, X17)
    	SHA512ROUND0(9, X17, X10, X11, X12, X13, X14, X15, X16)
    	SHA512ROUND0(10, X16, X17, X10, X11, X12, X13, X14, X15)
    	SHA512ROUND0(11, X15, X16, X17, X10, X11, X12, X13, X14)
    	SHA512ROUND0(12, X14, X15, X16, X17, X10, X11, X12, X13)
    	SHA512ROUND0(13, X13, X14, X15, X16, X17, X10, X11, X12)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Mar 07 14:57:07 UTC 2024
    - 9.1K bytes
    - Viewed (0)
  2. src/internal/bytealg/compare_riscv64.s

    	BNEZ	X7, align
    
    check32:
    	// X6 contains $32
    	BLT	X5, X6, compare16
    compare32:
    	MOV	0(X10), X15
    	MOV	0(X12), X16
    	MOV	8(X10), X17
    	MOV	8(X12), X18
    	BNE	X15, X16, cmp8a
    	BNE	X17, X18, cmp8b
    	MOV	16(X10), X15
    	MOV	16(X12), X16
    	MOV	24(X10), X17
    	MOV	24(X12), X18
    	BNE	X15, X16, cmp8a
    	BNE	X17, X18, cmp8b
    	ADD	$32, X10
    	ADD	$32, X12
    	SUB	$32, X5
    	BGE	X5, X6, compare32
    	BEQZ	X5, cmp_len
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Nov 09 13:57:06 UTC 2023
    - 3.9K bytes
    - Viewed (0)
  3. src/runtime/memmove_riscv64.s

    	BLT	X12, X9, f_loop16_check
    	MOV	$64, X9
    	BLT	X12, X9, f_loop32_check
    f_loop64:
    	MOV	0(X11), X14
    	MOV	8(X11), X15
    	MOV	16(X11), X16
    	MOV	24(X11), X17
    	MOV	32(X11), X18
    	MOV	40(X11), X19
    	MOV	48(X11), X20
    	MOV	56(X11), X21
    	MOV	X14, 0(X10)
    	MOV	X15, 8(X10)
    	MOV	X16, 16(X10)
    	MOV	X17, 24(X10)
    	MOV	X18, 32(X10)
    	MOV	X19, 40(X10)
    	MOV	X20, 48(X10)
    	MOV	X21, 56(X10)
    	ADD	$64, X10
    	ADD	$64, X11
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Nov 09 13:57:06 UTC 2023
    - 5.5K bytes
    - Viewed (0)
  4. test/inline_math_bits_rotate.go

    // Test that inlining of math/bits.RotateLeft* treats those calls as intrinsics.
    
    package p
    
    import "math/bits"
    
    var (
    	x8  uint8
    	x16 uint16
    	x32 uint32
    	x64 uint64
    	x   uint
    )
    
    func f() { // ERROR "can inline f"
    	x8 = bits.RotateLeft8(x8, 1)
    	x16 = bits.RotateLeft16(x16, 1)
    	x32 = bits.RotateLeft32(x32, 1)
    	x64 = bits.RotateLeft64(x64, 1)
    	x = bits.RotateLeft(x, 1)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Oct 19 23:33:25 UTC 2023
    - 571 bytes
    - Viewed (0)
  5. src/internal/bytealg/equal_riscv64.s

    loop32:
    	MOV	0(X10), X19
    	MOV	0(X11), X20
    	MOV	8(X10), X21
    	MOV	8(X11), X22
    	BNE	X19, X20, not_eq
    	BNE	X21, X22, not_eq
    	MOV	16(X10), X14
    	MOV	16(X11), X15
    	MOV	24(X10), X16
    	MOV	24(X11), X17
    	BNE	X14, X15, not_eq
    	BNE	X16, X17, not_eq
    	ADD	$32, X10
    	ADD	$32, X11
    	SUB	$32, X12
    	BGE	X12, X9, loop32
    	BEQZ	X12, eq
    
    loop16_check:
    	MOV	$16, X23
    	BLT	X12, X23, loop4_check
    loop16:
    	MOV	0(X10), X19
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Nov 09 13:57:06 UTC 2023
    - 2.4K bytes
    - Viewed (0)
  6. src/crypto/internal/nistec/fiat/p521_invert.go

    	//
    	//	_10       = 2*1
    	//	_11       = 1 + _10
    	//	_1100     = _11 << 2
    	//	_1111     = _11 + _1100
    	//	_11110000 = _1111 << 4
    	//	_11111111 = _1111 + _11110000
    	//	x16       = _11111111 << 8 + _11111111
    	//	x32       = x16 << 16 + x16
    	//	x64       = x32 << 32 + x32
    	//	x65       = 2*x64 + 1
    	//	x129      = x65 << 64 + x64
    	//	x130      = 2*x129 + 1
    	//	x259      = x130 << 129 + x129
    	//	x260      = 2*x259 + 1
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Aug 12 00:04:29 UTC 2022
    - 1.8K bytes
    - Viewed (0)
  7. test/codegen/noextend.go

    }
    func shiftidx(u8 *uint8, x16 *int16, u16 *uint16) {
    
    	// ppc64x:-"MOVBZ\tR\\d+,\\sR\\d+"
    	val16[0] = uint16(sval16[*u8>>2])
    
    	// ppc64x:-"MOVH\tR\\d+,\\sR\\d+"
    	sval16[1] = int16(val16[*x16>>1])
    
    	// ppc64x:-"MOVHZ\tR\\d+,\\sR\\d+"
    	val16[1] = uint16(sval16[*u16>>2])
    
    }
    
    func setnox(x8 int8, u8 *uint8, y8 *int8, z8 *uint8, x16 *int16, u16 *uint16, x32 *int32, u32 *uint32) {
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Dec 14 17:22:18 UTC 2023
    - 5.4K bytes
    - Viewed (0)
  8. src/crypto/internal/bigmod/nat_riscv64.s

    	ADD	X12, X22, X29	// next c
    
    	MULHU	X14, X6, X15	// z_hi[2] = x[2] * y
    	MUL	X14, X6, X14	// z_lo[2] = x[2] * y
    	ADD	X14, X16, X21	// z_lo[2] = x[2] * y + z[2]
    	SLTU	X14, X21, X22
    	ADD	X15, X22, X15	// z_hi[2] = x[2] * y + z[2]
    	ADD	X21, X29, X16	// z_lo[2] = x[2] * y + z[2] + c
    	SLTU	X21, X16, X22
    	ADD	X15, X22, X29	// next c
    
    	MULHU	X17, X6, X18	// z_hi[3] = x[3] * y
    	MUL	X17, X6, X17	// z_lo[3] = x[3] * y
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Nov 09 13:57:06 UTC 2023
    - 2.2K bytes
    - Viewed (0)
  9. src/crypto/internal/edwards25519/scalar_fiat.go

    	x12, x11 = bits.Mul64(x4, arg2[0])
    	var x13 uint64
    	var x14 uint64
    	x13, x14 = bits.Add64(x12, x9, uint64(0x0))
    	var x15 uint64
    	var x16 uint64
    	x15, x16 = bits.Add64(x10, x7, uint64(fiatScalarUint1(x14)))
    	var x17 uint64
    	var x18 uint64
    	x17, x18 = bits.Add64(x8, x5, uint64(fiatScalarUint1(x16)))
    	x19 := (uint64(fiatScalarUint1(x18)) + x6)
    	var x20 uint64
    	_, x20 = bits.Mul64(x11, 0xd2b51da312547e1b)
    	var x22 uint64
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Nov 10 18:45:00 UTC 2022
    - 35.6K bytes
    - Viewed (0)
  10. test/codegen/memops.go

    	if x8[i+1] < x8[0] {
    		return 0
    	}
    	// amd64: `MOVWLZX\t2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*2\), [A-Z]+[0-9]*`
    	if x16[i+1] < x16[0] {
    		return 0
    	}
    	// amd64: `MOVWLZX\t2\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[12]\), [A-Z]+[0-9]*`
    	if x16[16*i+1] < x16[0] {
    		return 0
    	}
    	// amd64: `MOVL\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*`
    	if x32[i+1] < x32[0] {
    		return 0
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Aug 04 16:40:24 UTC 2023
    - 12.5K bytes
    - Viewed (0)
Back to top