- Sort Score
- Result 10 results
- Languages All
Results 1 - 7 of 7 for AllocRange (0.15 sec)
-
src/runtime/mpallocbits.go
// structure change. type pallocData struct { pallocBits scavenged pageBits } // allocRange sets bits [i, i+n) in the bitmap to 1 and // updates the scavenged bits appropriately. func (m *pallocData) allocRange(i, n uint) { // Clear the scavenged bits when we alloc the range. m.pallocBits.allocRange(i, n) m.scavenged.clearRange(i, n) } // allocAll sets every bit in the bitmap to 1 and updates
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat May 18 15:13:43 UTC 2024 - 12.5K bytes - Viewed (0) -
src/runtime/mpallocbits_test.go
// the ranges in s to 1 and the rest to zero. func makePallocBits(s []BitRange) *PallocBits { b := new(PallocBits) for _, v := range s { b.AllocRange(v.I, v.N) } return b } // Ensures that PallocBits.AllocRange works, which is a fundamental // method used for testing and initialization since it's used by // makePallocBits. func TestPallocBitsAllocRange(t *testing.T) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Aug 17 22:00:17 UTC 2020 - 13.7K bytes - Viewed (0) -
src/runtime/mpagealloc.go
p.summary[l][i] = sum } } } } // allocRange marks the range of memory [base, base+npages*pageSize) as // allocated. It also updates the summaries to reflect the newly-updated // bitmap. // // Returns the amount of scavenged memory in bytes present in the // allocated range. // // p.mheapLock must be held. func (p *pageAlloc) allocRange(base, npages uintptr) uintptr { assertLockHeld(p.mheapLock)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 39.2K bytes - Viewed (0) -
src/runtime/mgcscavenge_test.go
func makePallocData(alloc, scavenged []BitRange) *PallocData { b := new(PallocData) for _, v := range alloc { if v.N == 0 { // Skip N==0. It's harmless and allocRange doesn't // handle this case. continue } b.AllocRange(v.I, v.N) } for _, v := range scavenged { if v.N == 0 { // See the previous loop. continue } b.ScavengedSetRange(v.I, v.N) } return b }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 25.2K bytes - Viewed (0) -
src/runtime/export_test.go
// Apply alloc state. for _, s := range init { // Ignore the case of s.N == 0. allocRange doesn't handle // it and it's a no-op anyway. if s.N != 0 { chunk.allocRange(s.I, s.N) // Make sure the scavenge index is updated. p.scav.index.alloc(ci, s.N) } } // Update heap metadata for the allocRange calls above. systemstack(func() { lock(p.mheapLock)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:50:53 UTC 2024 - 46.1K bytes - Viewed (0) -
src/runtime/mgcscavenge.go
// we don't want any allocating goroutines to grab it while // the scavenging is in progress. Be careful here -- just do the // bare minimum to avoid stepping on our own scavenging stats. p.chunkOf(ci).allocRange(base, npages) p.update(addr, uintptr(npages), true, true) // With that done, it's safe to unlock. unlock(p.mheapLock) if !p.test { // Only perform sys* operations if we're not in a test.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:48:45 UTC 2024 - 52.3K bytes - Viewed (0) -
src/runtime/mheap.go
} base, _ = h.pages.find(npages + extraPages) if base == 0 { throw("grew heap, but no adequate free space found") } } base = alignUp(base, physPageSize) scav = h.pages.allocRange(base, npages) } if base == 0 { // Try to acquire a base address. base, scav = h.pages.alloc(npages) if base == 0 { var ok bool growth, ok = h.grow(npages) if !ok {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 78K bytes - Viewed (0)