- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 84 for Spill (0.06 sec)
-
src/cmd/compile/internal/ssa/regalloc_test.go
// store after call, y must be loaded from a spill location Valu("mem4", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "mem"), Valu("mem5", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem4"), Exit("mem5"), ), ) flagalloc(f.f) regalloc(f.f) checkFunc(f.f) // Spill should be moved to exit2. if numSpills(f.blocks["loop1"]) != 0 { t.Errorf("spill present from loop1") }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Sep 08 19:09:14 UTC 2023 - 6.3K bytes - Viewed (0) -
src/reflect/abi.go
stackPtrs.append(1) } else { stackPtrs.append(0) } } else { spill += goarch.PtrSize } } for i, arg := range t.InSlice() { stkStep := in.addArg(arg) if stkStep != nil { addTypeBits(stackPtrs, stkStep.stkOff, arg) } else { spill = align(spill, uintptr(arg.Align())) spill += arg.Size() for _, st := range in.stepsForValue(i) { if st.kind == abiStepPointer {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 07 17:08:32 UTC 2024 - 15K bytes - Viewed (0) -
test/abi/part_live_2.go
package main import "runtime" import "unsafe" //go:registerparams func F(s []int) { for i, x := range s { G(i, x) } GC() H(&s[0]) // It's possible that this will make the spill redundant, but there's a bug in spill slot allocation. G(len(s), cap(s)) GC() } //go:noinline //go:registerparams func G(int, int) {} //go:noinline //go:registerparams func H(*int) {} //go:registerparams
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Apr 07 03:42:11 UTC 2021 - 986 bytes - Viewed (0) -
src/cmd/compile/internal/ssa/flagalloc.go
} if v.Type.IsFlags() { flag = v } } for _, v := range b.ControlValues() { if v != flag && v.Type.IsFlags() { spill[v.ID] = true } } if v := end[b.ID]; v != nil && v != flag { spill[v.ID] = true } } // Add flag spill and recomputation where they are needed. var remove []*Value // values that should be checked for possible removal var oldSched []*Value
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Oct 31 21:41:20 UTC 2022 - 6.7K bytes - Viewed (0) -
src/cmd/compile/internal/test/memcombine_test.go
func readUint16le(b []byte) uint64 { y := uint64(binary.LittleEndian.Uint16(b)) nop() // force spill return y } func readUint16be(b []byte) uint64 { y := uint64(binary.BigEndian.Uint16(b)) nop() // force spill return y } func readUint32le(b []byte) uint64 { y := uint64(binary.LittleEndian.Uint32(b)) nop() // force spill return y } func readUint32be(b []byte) uint64 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Nov 30 18:35:50 UTC 2023 - 4.5K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/regalloc.go
// Rematerialize instead of loading from the spill location. c = v.copyIntoWithXPos(s.curBlock, pos) } else { // Load v from its spill location. spill := s.makeSpill(v, s.curBlock) if s.f.pass.debug > logSpills { s.f.Warnl(vi.spill.Pos, "load spill for %v from %v", v, spill) } c = s.curBlock.NewValue1(pos, OpLoadReg, v.Type, spill) } s.setOrig(c, v) if onWasmStack {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Nov 21 17:49:56 UTC 2023 - 87.2K bytes - Viewed (0) -
test/fixedbugs/issue59367.go
_ = b[1] // bounds check x := *p // load a byte y := uint16(x) // zero extend to 16 bits b[0] = byte(y >> 8) // compute ROLW b[1] = byte(y) nop() // spill/restore ROLW b[0] = byte(y >> 8) // use ROLW b[1] = byte(y) } //go:noinline func f32(p *uint8, b []byte) { _ = b[3] // bounds check x := *p // load a byte
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 07 21:11:29 UTC 2023 - 1.7K bytes - Viewed (0) -
src/internal/trace/generation.go
evTable: &evTable{ pcs: make(map[uint64]frame), }, batches: make(map[ThreadID][]batch), } // Process the spilled batch. if spill != nil { g.gen = spill.gen if err := processBatch(g, *spill.batch); err != nil { return nil, nil, err } spill = nil } // Read batches one at a time until we either hit EOF or // the next generation. var spillErr error for { b, gen, err := readBatch(r)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 22:14:45 UTC 2024 - 12.1K bytes - Viewed (0) -
test/fixedbugs/issue20780.go
package main type Big = [400e6]byte func f() { // GC_ERROR "stack frame too large" // Note: This test relies on the fact that we currently always // spill function-results to the stack, even if they're so // large that we would normally heap allocate them. If we ever // improve the backend to spill temporaries to the heap, this // test will probably need updating to find some new way to // construct an overly large stack frame. g(h(), h()) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sun Jan 10 08:01:49 UTC 2021 - 755 bytes - Viewed (0) -
src/internal/trace/reader.go
) // Reader reads a byte stream, validates it, and produces trace events. type Reader struct { r *bufio.Reader lastTs Time gen *generation spill *spilledBatch spillErr error // error from reading spill frontier []*batchCursor cpuSamples []cpuSample order ordering emittedSync bool go121Events *oldTraceConverter } // NewReader creates a new trace reader.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 6.7K bytes - Viewed (0)