- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 77 for vmov (0.19 sec)
-
src/runtime/rt0_freebsd_riscv64.s
MOV $0x800000, A0 // stacksize = 8192KB MOV $_rt0_riscv64_freebsd_lib_go(SB), A1 MOV A0, 8(X2) MOV A1, 16(X2) MOV $runtime·newosproc0(SB), T0 JALR RA, T0 restore: // Restore callee-save registers, along with X1 (LR). MOV (8*3)(X2), X1 MOV (8*4)(X2), X8 MOV (8*5)(X2), X9 MOV (8*6)(X2), X18 MOV (8*7)(X2), X19 MOV (8*8)(X2), X20 MOV (8*9)(X2), X21 MOV (8*10)(X2), X22 MOV (8*11)(X2), X23
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Sep 28 03:17:13 UTC 2022 - 2.7K bytes - Viewed (0) -
src/runtime/memmove_riscv64.s
BNEZ X5, f_align f_loop_check: MOV $16, X9 BLT X12, X9, f_loop8_check MOV $32, X9 BLT X12, X9, f_loop16_check MOV $64, X9 BLT X12, X9, f_loop32_check f_loop64: MOV 0(X11), X14 MOV 8(X11), X15 MOV 16(X11), X16 MOV 24(X11), X17 MOV 32(X11), X18 MOV 40(X11), X19 MOV 48(X11), X20 MOV 56(X11), X21 MOV X14, 0(X10) MOV X15, 8(X10) MOV X16, 16(X10) MOV X17, 24(X10) MOV X18, 32(X10)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Nov 09 13:57:06 UTC 2023 - 5.5K bytes - Viewed (0) -
src/internal/runtime/syscall/asm_linux_riscv64.s
// err | A2 | part of A0 TEXT ·Syscall6<ABIInternal>(SB),NOSPLIT,$0-80 MOV A0, A7 MOV A1, A0 MOV A2, A1 MOV A3, A2 MOV A4, A3 MOV A5, A4 MOV A6, A5 ECALL MOV $-4096, T0 BLTU T0, A0, err // r1 already in A0 // r2 already in A1 MOV ZERO, A2 // errno RET err: SUB A0, ZERO, A2 // errno MOV $-1, A0 // r1 MOV ZERO, A1 // r2
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Feb 21 21:28:32 UTC 2024 - 969 bytes - Viewed (0) -
src/crypto/sha512/sha512block_riscv64.s
MOV (0*8)(X20), X5 MOV (1*8)(X20), X6 MOV (2*8)(X20), X7 MOV (3*8)(X20), X8 ADD X5, X10 // H0 = a + H0 ADD X6, X11 // H1 = b + H1 ADD X7, X12 // H2 = c + H2 ADD X8, X13 // H3 = d + H3 MOV X10, (0*8)(X20) MOV X11, (1*8)(X20) MOV X12, (2*8)(X20) MOV X13, (3*8)(X20) MOV (4*8)(X20), X5 MOV (5*8)(X20), X6 MOV (6*8)(X20), X7 MOV (7*8)(X20), X8
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Mar 07 14:57:07 UTC 2024 - 9.1K bytes - Viewed (0) -
src/runtime/sys_linux_riscv64.s
finish: MOV 8(X2), T0 // sec MOV 16(X2), T1 // nsec // restore stack MOV S2, X2 MOV 24(X2), T2 MOV T2, m_vdsoPC(S3) MOV 32(X2), T2 MOV T2, m_vdsoSP(S3) // sec is in T0, nsec in T1 // return nsec in T0 MOV $1000000000, T2 MUL T2, T0 ADD T1, T0 MOV T0, ret+0(FP) RET fallback: MOV $8(X2), A1 MOV $SYS_clock_gettime, A7 ECALL MOV 8(X2), T0 // sec
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Nov 09 13:57:06 UTC 2023 - 11.5K bytes - Viewed (0) -
src/runtime/sys_freebsd_riscv64.s
TEXT runtime·usleep(SB),NOSPLIT,$24-4 MOVWU usec+0(FP), A0 MOV $1000, A1 MUL A1, A0, A0 MOV $1000000000, A1 DIV A1, A0, A2 MOV A2, 8(X2) REM A1, A0, A3 MOV A3, 16(X2) ADD $8, X2, A0 MOV ZERO, A1 MOV $SYS_nanosleep, T0 ECALL RET // func thr_self() thread TEXT runtime·thr_self(SB),NOSPLIT,$8-8 MOV $ptr-8(SP), A0 // arg 1 &8(SP) MOV $SYS_thr_self, T0 ECALL MOV ptr-8(SP), A0
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jun 06 18:49:01 UTC 2023 - 8.9K bytes - Viewed (0) -
src/runtime/cgo/asm_riscv64.s
* 8(X2). */ ADD $(-8*29), X2 MOV X10, (8*1)(X2) // fn unsafe.Pointer MOV X11, (8*2)(X2) // a unsafe.Pointer MOV X13, (8*3)(X2) // ctxt uintptr MOV X8, (8*4)(X2) MOV X9, (8*5)(X2) MOV X18, (8*6)(X2) MOV X19, (8*7)(X2) MOV X20, (8*8)(X2) MOV X21, (8*9)(X2) MOV X22, (8*10)(X2) MOV X23, (8*11)(X2) MOV X24, (8*12)(X2) MOV X25, (8*13)(X2) MOV X26, (8*14)(X2) MOV g, (8*15)(X2) MOV X1, (8*16)(X2)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 12 00:43:51 UTC 2023 - 2.3K bytes - Viewed (0) -
src/runtime/memclr_riscv64.s
MOV $64, X9 BLT X11, X9, zero32 loop64: MOV ZERO, 0(X10) MOV ZERO, 8(X10) MOV ZERO, 16(X10) MOV ZERO, 24(X10) MOV ZERO, 32(X10) MOV ZERO, 40(X10) MOV ZERO, 48(X10) MOV ZERO, 56(X10) ADD $64, X10 SUB $64, X11 BGE X11, X9, loop64 BEQZ X11, done check32: MOV $32, X9 BLT X11, X9, check16 zero32: MOV ZERO, 0(X10) MOV ZERO, 8(X10) MOV ZERO, 16(X10) MOV ZERO, 24(X10)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Nov 09 13:57:06 UTC 2023 - 1.7K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/branchelim.go
// amd64 doesn't support CMOV with byte registers return false } return true default: return false } } // elimIf converts the one-way branch starting at dom in f to a conditional move if possible. // loadAddr is a set of values which are used to compute the address of a load. // Those values are exempt from CMOV generation.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Nov 30 17:46:51 UTC 2022 - 12.7K bytes - Viewed (0) -
test/codegen/condmove.go
func cmovfloatmove(x, y int) float64 { a := 1.0 if x <= y { a = 2.0 } // amd64:-"CMOV" // arm64:-"CSEL" // ppc64x:-"ISEL" // wasm:-"Select" return a } // On amd64, the following patterns trigger comparison inversion. // Test that we correctly invert the CMOV condition var gsink int64 var gusink uint64 func cmovinvert1(x, y int64) int64 { if x < gsink { y = -y
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Oct 06 20:57:33 UTC 2023 - 6.2K bytes - Viewed (0)