- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 12 for MemoryArg (0.22 sec)
-
src/cmd/compile/internal/ssa/tighten.go
for i := 0; i < len(b.Values); i++ { v := b.Values[i] t := target[v.ID] if t == nil || t == b { // v is not moveable, or is already in correct place. continue } if mem := v.MemoryArg(); mem != nil { if startMem[t.ID] != mem { // We can't move a value with a memory arg unless the target block // has that memory arg as its starting memory. continue } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 16 01:01:38 UTC 2023 - 7.7K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/deadstore.go
// operation not covered above then we probably need to keep it. // We also need to keep autos if they reach Phis (issue #26153). if v.Type.IsMemory() || v.Type.IsFlags() || v.Op == OpPhi || v.MemoryArg() != nil { for _, a := range args { if n, ok := addr[a]; ok { if !used.Has(n) { used.Add(n) changed = true } } } return }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 25 20:07:26 UTC 2024 - 11K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/schedule.go
if v.Op != OpPhi && v.Op != OpInitMem && v.Type.IsMemory() { nextMem[v.MemoryArg().ID] = v } } // Add edges to enforce that any load must come before the following store. for _, v := range b.Values { if v.Op == OpPhi || v.Type.IsMemory() { continue } w := v.MemoryArg() if w == nil { continue } if s := nextMem[w.ID]; s != nil && s.Block == b {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 08 15:53:17 UTC 2024 - 16.4K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/writebarrier.go
return false // writes into the stack don't need write barrier } // If we're writing to a place that might have heap pointers, we need // the write barrier. if mightContainHeapPointer(dst, t.Size(), v.MemoryArg(), zeroes) { return true } // Lastly, check if the values we're writing might be heap pointers. // If they aren't, we don't need a write barrier. switch v.Op { case OpStore:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Sep 08 19:09:14 UTC 2023 - 23.5K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/value.go
} return reg.(*Register).name } // MemoryArg returns the memory argument for the Value. // The returned value, if non-nil, will be memory-typed (or a tuple with a memory-typed second part). // Otherwise, nil is returned. func (v *Value) MemoryArg() *Value { if v.Op == OpPhi { v.Fatalf("MemoryArg on Phi") } na := len(v.Args) if na == 0 { return nil }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 03 16:40:22 UTC 2024 - 16.7K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/expand_calls.go
} var rc registerCursor var result *[]*Value if len(aRegs) > 0 { result = &allResults } else { if a.Op == OpLoad && a.Args[0].Op == OpLocalAddr { addr := a.Args[0] if addr.MemoryArg() == a.MemoryArg() && addr.Aux == aux.NameOfResult(i) { continue // Self move to output parameter } } } rc.init(aRegs, aux.abiInfo, result, auxBase, auxOffset)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Nov 28 05:13:40 UTC 2023 - 31.9K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/flagalloc.go
f.Fatalf("phi of flags not supported: %s", v.LongString()) } // If v will be spilled, and v uses memory, then we must split it // into a load + a flag generator. if spill[v.ID] && v.MemoryArg() != nil { remove = append(remove, v) if !f.Config.splitLoad(v) { f.Fatalf("can't split flag generator: %s", v.LongString()) } } // Make sure any flag arg of v is in the flags register.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Oct 31 21:41:20 UTC 2022 - 6.7K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/check.go
// ops that generate memory values. ss.clear() for _, v := range b.Values { if v.Op == OpPhi || !v.Type.IsMemory() { continue } if m := v.MemoryArg(); m != nil { ss.add(m.ID) } } // There should be at most one remaining unoverwritten memory value. for _, v := range b.Values { if !v.Type.IsMemory() { continue }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 09 16:41:23 UTC 2024 - 17.6K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/branchelim.go
// don't fuse memory ops, Phi ops, divides (can panic), // or anything else with side-effects for _, v := range b.Values { if v.Op == OpPhi || isDivMod(v.Op) || isPtrArithmetic(v.Op) || v.Type.IsMemory() || v.MemoryArg() != nil || opcodeTable[v.Op].hasSideEffects { return false } } return true } func isDivMod(op Op) bool { switch op { case OpDiv8, OpDiv8u, OpDiv16, OpDiv16u,
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Nov 30 17:46:51 UTC 2022 - 12.7K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/memcombine.go
var order []*Value for _, b := range f.Blocks { // Mark all stores which are not last in a store sequence. mark.clear() for _, v := range b.Values { if v.Op == OpStore { mark.add(v.MemoryArg().ID) } } // pick an order for visiting stores such that // later stores come earlier in the ordering. order = order[:0] for _, v := range b.Values { if v.Op != OpStore {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Mar 21 19:45:41 UTC 2024 - 18.4K bytes - Viewed (0)