- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 274 for sP (0.04 sec)
-
src/runtime/signal_riscv64.go
// Push the LR to stack, as we'll clobber it in order to // push the call. The function being pushed is responsible // for restoring the LR and setting the SP back. // This extra slot is known to gentraceback. sp := c.sp() - goarch.PtrSize c.set_sp(sp) *(*uint64)(unsafe.Pointer(uintptr(sp))) = c.ra() // Set up PC and LR to pretend the function being signaled // calls targetPC at resumePC. c.set_ra(uint64(resumePC))
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Oct 04 02:55:17 UTC 2023 - 2.9K bytes - Viewed (0) -
src/cmd/asm/internal/asm/testdata/riscv64error.s
MOV $8(SP), (X5) // ERROR "address load must target register" MOVB $8(SP), X5 // ERROR "unsupported address load" MOVH $8(SP), X5 // ERROR "unsupported address load" MOVW $8(SP), X5 // ERROR "unsupported address load" MOVF $8(SP), X5 // ERROR "unsupported address load" MOV $1234, 0(SP) // ERROR "constant load must target register" MOV $1234, 8(SP) // ERROR "constant load must target register"
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sun Apr 07 03:32:27 UTC 2024 - 2.8K bytes - Viewed (0) -
src/crypto/aes/gcm_amd64.s
increment(5) MOVOU T0, (8*16 + 6*16)(SP) increment(6) MOVOU T0, (8*16 + 7*16)(SP) increment(7) MOVOU (8*16 + 0*16)(SP), B0 MOVOU (8*16 + 1*16)(SP), B1 MOVOU (8*16 + 2*16)(SP), B2 MOVOU (8*16 + 3*16)(SP), B3 MOVOU (8*16 + 4*16)(SP), B4 MOVOU (8*16 + 5*16)(SP), B5 MOVOU (8*16 + 6*16)(SP), B6 MOVOU (8*16 + 7*16)(SP), B7 aesRound(1) increment(0) aesRound(2) increment(1) aesRound(3)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 23.4K bytes - Viewed (0) -
src/crypto/sha1/sha1block_386.s
// // The stack holds the intermediate word array - 16 uint32s - at 0(SP) up to 64(SP). // The saved a, b, c, d, e (R11 through R15 on amd64) are at 64(SP) up to 84(SP). // The saved limit pointer (DI on amd64) is at 84(SP). // The saved data pointer (SI on amd64) is at 88(SP). #define LOAD(index, e) \ MOVL 88(SP), SI; \ MOVL (index*4)(SI), DI; \ BSWAPL DI; \ MOVL DI, (index*4)(SP); \ ADDL DI, e #define SHUFFLE(index, e) \
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 6K bytes - Viewed (0) -
src/runtime/asm_wasm.s
TEXT runtime·morestack(SB), NOSPLIT, $0-0 // R1 = g.m MOVD g_m(g), R1 // R2 = g0 MOVD m_g0(R1), R2 // Set g->sched to context in f. NOP SP // tell vet SP changed - stop checking offsets MOVD 0(SP), g_sched+gobuf_pc(g) MOVD $8(SP), g_sched+gobuf_sp(g) // f's SP MOVD CTXT, g_sched+gobuf_ctxt(g) // Cannot grow scheduler stack (m->g0). Get g Get R2 I64Eq If
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Nov 20 21:26:51 UTC 2023 - 11.8K bytes - Viewed (0) -
src/runtime/mkpreempt.go
var l = layout{sp: "SP"} for _, reg := range regNames386 { if reg == "SP" || strings.HasPrefix(reg, "X") { continue } l.add("MOVL", reg, 4) } softfloat := "GO386_softfloat" // Save SSE state only if supported. lSSE := layout{stack: l.stack, sp: "SP"} for i := 0; i < 8; i++ { lSSE.add("MOVUPS", fmt.Sprintf("X%d", i), 16) } p("ADJSP $%d", lSSE.stack) p("NOP SP") l.save()
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Nov 20 17:19:36 UTC 2023 - 15.3K bytes - Viewed (0) -
src/runtime/asan_loong64.s
// func runtime·doasanread(addr unsafe.Pointer, sz, sp, pc uintptr) TEXT runtime·doasanread(SB), NOSPLIT, $0-32 MOVV addr+0(FP), RARG0 MOVV sz+8(FP), RARG1 MOVV sp+16(FP), RARG2 MOVV pc+24(FP), RARG3 // void __asan_read_go(void *addr, uintptr_t sz, void *sp, void *pc); MOVV $__asan_read_go(SB), FARG JMP asancall<>(SB) // func runtime·doasanwrite(addr unsafe.Pointer, sz, sp, pc uintptr) TEXT runtime·doasanwrite(SB), NOSPLIT, $0-32
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Nov 22 02:20:04 UTC 2023 - 2.1K bytes - Viewed (0) -
src/image/draw/draw.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 11 17:08:05 UTC 2024 - 33.9K bytes - Viewed (0) -
src/runtime/signal_ppc64x.go
// Push the LR to stack, as we'll clobber it in order to // push the call. The function being pushed is responsible // for restoring the LR and setting the SP back. // This extra space is known to gentraceback. sp := c.sp() - sys.MinFrameSize c.set_sp(sp) *(*uint64)(unsafe.Pointer(uintptr(sp))) = c.link() // In PIC mode, we'll set up (i.e. clobber) R2 on function // entry. Save it ahead of time.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Sep 08 15:08:04 UTC 2023 - 3.7K bytes - Viewed (0) -
src/runtime/rt0_windows_amd64.s
// Create a new thread to do the runtime initialization and return. MOVQ BX, 32(SP) // callee-saved, preserved across the CALL MOVQ SP, BX ANDQ $~15, SP // alignment as per Windows requirement MOVQ _cgo_sys_thread_create(SB), AX MOVQ $_rt0_amd64_windows_lib_go(SB), CX MOVQ $0, DX CALL AX MOVQ BX, SP MOVQ 32(SP), BX RET TEXT _rt0_amd64_windows_lib_go(SB),NOSPLIT|NOFRAME,$0 MOVQ $0, DI MOVQ $0, SI
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Jul 19 11:55:15 UTC 2023 - 1.1K bytes - Viewed (0)