- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 219 for clobbers (0.27 sec)
-
src/cmd/compile/internal/ssa/_gen/S390XOps.go
// It saves all GP registers if necessary, // but clobbers R14 (LR) because it's a call, // and also clobbers R1 as the PLT stub does. // Returns a pointer to a write barrier buffer in R9. {name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ gpg) | buildReg("R14") | r1, outputs: []regMask{r9}}, clobberFlags: true, aux: "Int64"},
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Feb 24 00:21:13 UTC 2023 - 52.5K bytes - Viewed (0) -
src/cmd/compile/internal/test/fixedbugs_test.go
"testing" ) type T struct { x [2]int64 // field that will be clobbered. Also makes type not SSAable. p *byte // has a pointer } //go:noinline func makeT() T { return T{} } var g T var sink interface{} func TestIssue15854(t *testing.T) { for i := 0; i < 10000; i++ { if g.x[0] != 0 { t.Fatalf("g.x[0] clobbered with %x\n", g.x[0]) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Feb 06 18:07:35 UTC 2023 - 2.1K bytes - Viewed (0) -
src/runtime/cgo/gcc_mips64x.S
sdc1 $f31, 144($29) #endif // prepare SB register = pc & 0xffffffff00000000 bal 1f 1: dsrl $28, $31, 32 dsll $28, $28, 32 move $20, $4 // save R4 move $1, $6 jalr $5 // call setg_gcc (clobbers R4) jalr $20 // call fn ld $16, 8($29) ld $17, 16($29) ld $18, 24($29) ld $19, 32($29) ld $20, 40($29) ld $21, 48($29) ld $22, 56($29) ld $23, 64($29) ld $28, 72($29) ld $30, 80($29)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jan 24 22:38:02 UTC 2023 - 1.8K bytes - Viewed (0) -
src/runtime/sys_windows_386.s
// Set up tls. LEAL m_tls(CX), DI MOVL CX, g_m(DX) MOVL DX, g(DI) MOVL DI, 4(SP) CALL runtime·setldt(SB) // clobbers CX and DX // Someday the convention will be D is always cleared. CLD CALL runtime·stackcheck(SB) // clobbers AX,CX CALL runtime·mstart(SB) RET // uint32 tstart_stdcall(M *newm); TEXT runtime·tstart_stdcall(SB),NOSPLIT,$0 MOVL newm+0(FP), BX
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Sep 21 15:56:43 UTC 2023 - 6.5K bytes - Viewed (0) -
src/runtime/cgo/gcc_loong64.S
fst.d $f27, $r3, 112 fst.d $f28, $r3, 120 fst.d $f29, $r3, 128 fst.d $f30, $r3, 136 fst.d $f31, $r3, 144 move $r18, $r4 // save R4 move $r19, $r6 jirl $r1, $r5, 0 // call setg_gcc (clobbers R4) jirl $r1, $r18, 0 // call fn ld.d $r23, $r3, 8 ld.d $r24, $r3, 16 ld.d $r25, $r3, 24 ld.d $r26, $r3, 32 ld.d $r27, $r3, 40 ld.d $r28, $r3, 48 ld.d $r29, $r3, 56 ld.d $r30, $r3, 64
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Dec 05 18:57:04 UTC 2022 - 1.5K bytes - Viewed (0) -
src/runtime/cgo/gcc_riscv64.S
fsd f23, 160(sp) fsd f24, 168(sp) fsd f25, 176(sp) fsd f26, 184(sp) fsd f27, 192(sp) // a0 = *fn, a1 = *setg_gcc, a2 = *g mv s1, a0 mv s0, a1 mv a0, a2 jalr ra, s0 // call setg_gcc (clobbers x30 aka g) jalr ra, s1 // call fn ld x1, 0(sp) ld x8, 8(sp) ld x9, 16(sp) ld x18, 24(sp) ld x19, 32(sp) ld x20, 40(sp) ld x21, 48(sp) ld x22, 56(sp) ld x23, 64(sp) ld x24, 72(sp)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Dec 05 16:41:48 UTC 2022 - 1.6K bytes - Viewed (0) -
src/cmd/cgo/internal/test/stubtest_linux_ppc64le.S
.type toc_func, @function toc_func: addis 2,12,.TOC.-toc_func@ha addi 2,2,.TOC.-toc_func@l .localentry toc_func, .-toc_func mflr 0 std 0,16(1) stdu 1,-32(1) // Call a NOTOC function which clobbers R2. bl notoc_nor2_func nop // Call libc random. This should generate a TOC relative plt stub. bl random nop addi 1,1,32 ld 0,16(1) mtlr 0 blr
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Sep 22 15:06:17 UTC 2023 - 3.7K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/ARM64Ops.go
// It saves all GP registers if necessary, // but clobbers R30 (LR) because it's a call. // R16 and R17 may be clobbered by linker trampoline. // Returns a pointer to a write barrier buffer in R25. {name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ gpg) | buildReg("R16 R17 R30"), outputs: []regMask{buildReg("R25")}}, clobberFlags: true, aux: "Int64"},
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 15:49:20 UTC 2024 - 58.8K bytes - Viewed (0) -
src/runtime/sys_windows_arm.s
MOVM.DB.W [R4-R11, R14], (R13) // push {r4-r11, lr} (SP-=40) SUB $(16), R13 // reserve space for parameters/retval to go call MOVW R0, R6 // Save param0 MOVW R1, R7 // Save param1 BL runtime·load_g(SB) // Clobbers R0 MOVW $0, R4 MOVW R4, 0(R13) // No saved link register. MOVW R6, 4(R13) // Move arg0 into position MOVW R7, 8(R13) // Move arg1 into position BL runtime·sigtrampgo(SB)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Sep 21 15:56:43 UTC 2023 - 7.7K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/PPC64latelower.rules
(CMPconst <t> [0] (Select0 z:((ADDCCconst|ANDCCconst|NEGCC|CNTLZDCC|RLDICLCC) y))) => (Select1 <t> z) // After trying to convert ANDconst to ANDCCconst above, if the CC result is not needed, try to avoid using // ANDconst which clobbers CC. (ANDconst [m] x) && isPPC64ValidShiftMask(m) => (RLDICL [encodePPC64RotateMask(0,m,64)] x) // Likewise, trying converting RLDICLCC back to ANDCCconst as it is faster.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 19:59:38 UTC 2024 - 3.8K bytes - Viewed (0)