- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 2,060 for spill (0.24 sec)
-
src/cmd/compile/internal/walk/assign.go
nodes.Append(nif) // Index to start copying into s. // idx = newLen - len(l2) // We use this expression instead of oldLen because it avoids // a spill/restore of oldLen. // Note: this doesn't work optimally currently because // the compiler optimizer undoes this arithmetic. idx := ir.NewBinaryExpr(base.Pos, ir.OSUB, newLen, ir.NewUnaryExpr(base.Pos, ir.OLEN, l2))
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:09:06 UTC 2024 - 20.3K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/ARM64Ops.go
// STP.P (ZR,ZR), 16(R16) // CMP Rarg1, R16 // BLE -2(PC) // Note: the-end-of-the-memory may be not a valid pointer. it's a problem if it is spilled. // the-end-of-the-memory - 16 is with the area to zero, ok to spill. { name: "LoweredZero", argLength: 3, reg: regInfo{ inputs: []regMask{buildReg("R16"), gp}, clobbers: buildReg("R16"), }, clobberFlags: true, faultOnNilArg0: true,
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 15:49:20 UTC 2024 - 58.8K bytes - Viewed (0) -
src/runtime/syscall_windows_test.go
return uintptr(i1.x + i1.y + i2.x + i2.y + i3.x + i3.y + i4.x + i4.y + i5.x + i5.y) } // This test forces a GC. The idea is to have enough arguments // that insufficient spill slots allocated (according to the ABI) // may cause compiler-generated spills to clobber the return PC. // Then, the GC stack scanning will catch that. // //go:registerparams func sum9andGC(i1, i2, i3, i4, i5, i6, i7, i8, i9 uint32) uintptr { runtime.GC()
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Aug 31 16:31:35 UTC 2023 - 32.5K bytes - Viewed (0) -
doc/go_mem.html
could discard <code>i</code> without saving a copy and then reload <code>i = *p</code> just before <code>funcs[i]()</code>. A Go compiler must not, because the value of <code>*p</code> may have changed. (Instead, the compiler could spill <code>i</code> to the stack.) </p> <p> Not allowing a single write to write multiple values also means not using the memory where a local variable will be written as temporary storage before the write.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 15:54:42 UTC 2024 - 26.6K bytes - Viewed (0) -
src/runtime/asm_amd64.s
goodm: MOVQ R14, AX // AX (and arg 0) = g MOVQ SI, R14 // g = g.m.g0 get_tls(CX) // Set G in TLS MOVQ R14, g(CX) MOVQ (g_sched+gobuf_sp)(R14), SP // sp = g0.sched.sp PUSHQ AX // open up space for fn's arg spill slot MOVQ 0(DX), R12 CALL R12 // fn(g) // The Windows native stack unwinder incorrectly classifies the next instruction // as part of the function epilogue, producing a wrong call stack.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat May 11 20:38:24 UTC 2024 - 60.4K bytes - Viewed (0) -
src/cmd/internal/obj/riscv/obj.go
if framesize == 0 { return p } if ctxt.Flag_maymorestack != "" { // Save LR and REGCTXT const frameSize = 16 p = ctxt.StartUnsafePoint(p, newprog) // Spill Arguments. This has to happen before we open // any more frame space. p = cursym.Func().SpillRegisterArgs(p, newprog) // MOV LR, -16(SP) p = obj.Appendp(p, newprog) p.As = AMOV
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sun Apr 07 03:32:27 UTC 2024 - 77K bytes - Viewed (0) -
src/reflect/type.go
abid = newAbiDesc(t, rcvr) // build dummy rtype holding gc program x := &abi.Type{ Align_: goarch.PtrSize, // Don't add spill space here; it's only necessary in // reflectcall's frame, not in the allocated frame. // TODO(mknyszek): Remove this comment when register // spill space in the frame is no longer required. Size_: align(abid.retOffset+abid.ret.stackBytes, goarch.PtrSize),
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 85.5K bytes - Viewed (0) -
src/runtime/asm_386.s
// MOVL DX, 4(DI) // dowrite: // MOVL AX, 88(CX) TEXT gcWriteBarrier<>(SB),NOSPLIT,$28 // Save the registers clobbered by the fast path. This is slightly // faster than having the caller spill these. MOVL CX, 20(SP) MOVL BX, 24(SP) retry: // TODO: Consider passing g.m.p in as an argument so they can be shared // across a sequence of write barriers. get_tls(BX) MOVL g(BX), BX
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Mar 15 15:45:13 UTC 2024 - 43.1K bytes - Viewed (0) -
src/runtime/asm_ppc64x.s
// We don't save all registers on ppc64 because it takes too much space. MOVD R20, (FIXED_FRAME+0)(R1) MOVD R21, (FIXED_FRAME+8)(R1) // R0 is always 0, so no need to spill. // R1 is SP. // R2 is SB. MOVD R3, (FIXED_FRAME+16)(R1) MOVD R4, (FIXED_FRAME+24)(R1) MOVD R5, (FIXED_FRAME+32)(R1) MOVD R6, (FIXED_FRAME+40)(R1) MOVD R7, (FIXED_FRAME+48)(R1)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 18:17:17 UTC 2024 - 45.4K bytes - Viewed (0) -
src/runtime/traceback.go
if !isLive(off, slotIdx) { print("?") } } start := true printcomma := func() { if !start { print(", ") } } pi := 0 slotIdx := uint8(0) // register arg spill slot index printloop: for { o := p[pi] pi++ switch o { case abi.TraceArgsEndSeq: break printloop case abi.TraceArgsStartAgg: printcomma() print("{") start = true
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 16:25:21 UTC 2024 - 55.1K bytes - Viewed (0)