- Sort Score
- Result 10 results
- Languages All
Results 1 - 9 of 9 for PallocBits (0.16 sec)
-
src/runtime/mpallocbits.go
return } // pallocBits is a bitmap that tracks page allocations for at most one // palloc chunk. // // The precise representation is an implementation detail, but for the // sake of documentation, 0s are free pages and 1s are allocated pages. type pallocBits pageBits // summarize returns a packed summary of the bitmap in pallocBits. func (b *pallocBits) summarize() pallocSum { var start, most, cur uint
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat May 18 15:13:43 UTC 2024 - 12.5K bytes - Viewed (0) -
src/runtime/mpallocbits_test.go
} return false } return true } // makePallocBits produces an initialized PallocBits by setting // the ranges in s to 1 and the rest to zero. func makePallocBits(s []BitRange) *PallocBits { b := new(PallocBits) for _, v := range s { b.AllocRange(v.I, v.N) } return b } // Ensures that PallocBits.AllocRange works, which is a fundamental // method used for testing and initialization since it's used by
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Aug 17 22:00:17 UTC 2020 - 13.7K bytes - Viewed (0) -
src/runtime/export_test.go
// Expose pallocBits for testing. type PallocBits pallocBits func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) { return (*pallocBits)(b).find(npages, searchIdx) } func (b *PallocBits) AllocRange(i, n uint) { (*pallocBits)(b).allocRange(i, n) } func (b *PallocBits) Free(i, n uint) { (*pallocBits)(b).free(i, n) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:50:53 UTC 2024 - 46.1K bytes - Viewed (0) -
src/runtime/mpagealloc_test.go
if gb == nil && wb == nil { continue } if (gb == nil && wb != nil) || (gb != nil && wb == nil) { t.Errorf("chunk %d nilness mismatch", i) } if !checkPallocBits(t, gb.PallocBits(), wb.PallocBits()) { t.Logf("in chunk %d (mallocBits)", i) } if !checkPallocBits(t, gb.Scavenged(), wb.Scavenged()) { t.Logf("in chunk %d (scavenged)", i) } } // TODO(mknyszek): Verify summaries too? }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Dec 06 19:16:48 UTC 2021 - 32.6K bytes - Viewed (0) -
src/runtime/mgcscavenge.go
i := int(searchIdx / 64) // Start by quickly skipping over blocks of non-free or scavenged pages. for ; i >= 0; i-- { // 1s are scavenged OR non-free => 0s are unscavenged AND free x := fillAligned(m.scavenged[i]|m.pallocBits[i], uint(minimum)) if x != ^uint64(0) { break } } if i < 0 { // Failed to find any free/unscavenged pages. return 0, 0 } // We have something in the 64-bit chunk at i, but it could
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:48:45 UTC 2024 - 52.3K bytes - Viewed (0) -
src/runtime/mpagealloc.go
// each level fits in 64 bytes (2^3 summaries * 8 bytes per summary), which is // close to the L1 cache line width on many systems. Also, a value of 3 fits 4 tree // levels perfectly into the 21-bit pallocBits summary field at the root level. // // The following equation explains how each of the constants relate: // summaryL0Bits + (summaryLevels-1)*summaryLevelBits + logPallocChunkBytes = heapAddrBits //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 39.2K bytes - Viewed (0) -
src/runtime/mgcsweep.go
} mheap_.pagesSwept.Add(int64(s.npages)) spc := s.spanclass size := s.elemsize // The allocBits indicate which unmarked objects don't need to be // processed since they were free at the end of the last GC cycle // and were not allocated since then. // If the allocBits index is >= s.freeindex and the bit // is not marked then the object remains unallocated // since the last GC.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:52:18 UTC 2024 - 32.9K bytes - Viewed (0) -
src/runtime/mheap.go
// as allocBits for newly allocated spans. // // The pointer arithmetic is done "by hand" instead of using // arrays to avoid bounds checks along critical performance // paths. // The sweep will free the old allocBits and set allocBits to the // gcmarkBits. The gcmarkBits are replaced with a fresh zeroed // out memory. allocBits *gcBits gcmarkBits *gcBits
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 78K bytes - Viewed (0) -
src/runtime/mbitmap.go
mask uint8 index uintptr } //go:nosplit func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits { bytep, mask := s.allocBits.bitp(allocBitIndex) return markBits{bytep, mask, allocBitIndex} } // refillAllocCache takes 8 bytes s.allocBits starting at whichByte // and negates them so that ctz (count trailing zeros) instructions // can be used. It then places these 8 bytes into the cached 64 bit
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 00:18:55 UTC 2024 - 60K bytes - Viewed (0)