- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 26 for NumBlocks (0.3 sec)
-
src/cmd/compile/internal/ssa/layout.go
func layoutOrder(f *Func) []*Block { order := make([]*Block, 0, f.NumBlocks()) scheduled := f.Cache.allocBoolSlice(f.NumBlocks()) defer f.Cache.freeBoolSlice(scheduled) idToBlock := f.Cache.allocBlockSlice(f.NumBlocks()) defer f.Cache.freeBlockSlice(idToBlock) indegree := f.Cache.allocIntSlice(f.NumBlocks()) defer f.Cache.freeIntSlice(indegree) posdegree := f.newSparseSet(f.NumBlocks()) // blocks with positive remaining degree
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Oct 31 21:41:20 UTC 2022 - 5K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/likelyadjust.go
// is less likely. It's possible to assign a negative // unlikeliness (though not currently the case). certain := f.Cache.allocInt8Slice(f.NumBlocks()) // In the long run, all outcomes are at least this bad. Mainly for Exit defer f.Cache.freeInt8Slice(certain) local := f.Cache.allocInt8Slice(f.NumBlocks()) // for our immediate predecessors. defer f.Cache.freeInt8Slice(local) po := f.postorder() nest := f.loopnest() b2l := nest.b2l
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Oct 31 21:41:20 UTC 2022 - 15.4K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/dom.go
func dominatorsSimple(f *Func) []*Block { // A simple algorithm for now // Cooper, Harvey, Kennedy idom := make([]*Block, f.NumBlocks()) // Compute postorder walk post := f.postorder() // Make map from block id to order index (for intersect call) postnum := f.Cache.allocIntSlice(f.NumBlocks()) defer f.Cache.freeIntSlice(postnum) for i, b := range post { postnum[b.ID] = i }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat Dec 03 17:08:51 UTC 2022 - 7.4K bytes - Viewed (0) -
src/vendor/golang.org/x/crypto/chacha20/chacha_generic.go
// not to generate any more output after the buffer is drained. numBlocks := (uint64(len(src)) + blockSize - 1) / blockSize if s.overflow || uint64(s.counter)+numBlocks > 1<<32 { panic("chacha20: counter overflow") } else if uint64(s.counter)+numBlocks == 1<<32 { s.overflow = true } // xorKeyStreamBlocks implementations expect input lengths that are a
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Oct 26 00:11:50 UTC 2022 - 13.9K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/lca.go
depth int32 // depth in dominator tree (root=0, its children=1, etc.) } func makeLCArange(f *Func) *lcaRange { dom := f.Idom() // Build tree blocks := make([]lcaRangeBlock, f.NumBlocks()) for _, b := range f.Blocks { blocks[b.ID].b = b if dom[b.ID] == nil { continue // entry or unreachable } parent := dom[b.ID].ID blocks[b.ID].parent = parent
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Oct 30 21:52:15 UTC 2023 - 3.8K bytes - Viewed (0) -
src/cmd/compile/internal/ssagen/phi.go
break levels } } } // Allocate scratch locations. s.priq.level = s.level s.q = make([]*ssa.Block, 0, s.f.NumBlocks()) s.queued = newSparseSet(s.f.NumBlocks()) s.hasPhi = newSparseSet(s.f.NumBlocks()) s.hasDef = newSparseSet(s.f.NumBlocks()) s.placeholder = s.s.entryNewValue0(ssa.OpUnknown, types.TypeInvalid) // Generate phi ops for each variable. for n := range vartypes {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Nov 18 17:59:44 UTC 2022 - 15.2K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/tighten.go
canMove := f.Cache.allocBoolSlice(f.NumValues()) defer f.Cache.freeBoolSlice(canMove) // Compute the memory states of each block. startMem := f.Cache.allocValueSlice(f.NumBlocks()) defer f.Cache.freeValueSlice(startMem) endMem := f.Cache.allocValueSlice(f.NumBlocks()) defer f.Cache.freeValueSlice(endMem) memState(f, startMem, endMem) for _, b := range f.Blocks { for _, v := range b.Values { if v.Op.isLoweredGetClosurePtr() {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 16 01:01:38 UTC 2023 - 7.7K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/flagalloc.go
// Compute the in-register flag value we want at the end of // each block. This is basically a best-effort live variable // analysis, so it can be much simpler than a full analysis. end := f.Cache.allocValueSlice(f.NumBlocks()) defer f.Cache.freeValueSlice(end) po := f.postorder() for n := 0; n < 2; n++ { for _, b := range po { // Walk values backwards to figure out what flag // value we want in the flag register at the start
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Oct 31 21:41:20 UTC 2022 - 6.7K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/sparsetree.go
type SparseTree []SparseTreeNode // newSparseTree creates a SparseTree from a block-to-parent map (array indexed by Block.ID). func newSparseTree(f *Func, parentOf []*Block) SparseTree { t := make(SparseTree, f.NumBlocks()) for _, b := range f.Blocks { n := &t[b.ID] if p := parentOf[b.ID]; p != nil { n.parent = p n.sibling = t[p.ID].child t[p.ID].child = b } } t.numberBlock(f.Entry, 1) return t }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Nov 18 17:59:44 UTC 2022 - 8.1K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/loopreschedchecks.go
if lastMems[f.Entry.ID] == nil { lastMems[f.Entry.ID] = f.Entry.NewValue0(f.Entry.Pos, OpInitMem, types.TypeMem) } memDefsAtBlockEnds := f.Cache.allocValueSlice(f.NumBlocks()) // For each block, the mem def seen at its bottom. Could be from earlier block. defer f.Cache.freeValueSlice(memDefsAtBlockEnds) // Propagate last mem definitions forward through successor blocks.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Aug 22 21:17:10 UTC 2023 - 16K bytes - Viewed (0)