- Sort Score
- Result 10 results
- Languages All
Results 1 - 4 of 4 for allocToCache (0.14 sec)
-
src/runtime/mpagecache.go
*c = pageCache{} } // allocToCache acquires a pageCachePages-aligned chunk of free pages which // may not be contiguous, and returns a pageCache structure which owns the // chunk. // // p.mheapLock must be held. // // Must run on the system stack because p.mheapLock must be held. // //go:systemstack func (p *pageAlloc) allocToCache() pageCache { assertLockHeld(p.mheapLock)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Apr 19 14:30:00 UTC 2023 - 5.6K bytes - Viewed (0) -
src/runtime/mpagecache_test.go
v := v t.Run(name, func(t *testing.T) { b := NewPageAlloc(v.beforeAlloc, v.beforeScav) defer FreePageAlloc(b) for _, expect := range v.hits { checkPageCache(t, b.AllocToCache(), expect) if t.Failed() { return } } want := NewPageAlloc(v.afterAlloc, v.afterScav) defer FreePageAlloc(want) checkPageAlloc(t, want, b) }) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Dec 06 19:16:48 UTC 2021 - 10.8K bytes - Viewed (0) -
src/runtime/export_test.go
unlock(pp.mheapLock) }) return addr, scav } func (p *PageAlloc) AllocToCache() PageCache { pp := (*pageAlloc)(p) var c PageCache systemstack(func() { // None of the tests need any higher-level locking, so we just // take the lock internally. lock(pp.mheapLock) c = PageCache(pp.allocToCache()) unlock(pp.mheapLock) }) return c }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:50:53 UTC 2024 - 46.1K bytes - Viewed (0) -
src/runtime/mheap.go
pp := gp.m.p.ptr() if !needPhysPageAlign && pp != nil && npages < pageCachePages/4 { c := &pp.pcache // If the cache is empty, refill it. if c.empty() { lock(&h.lock) *c = h.pages.allocToCache() unlock(&h.lock) } // Try to allocate from the cache. base, scav = c.alloc(npages) if base != 0 { s = h.tryAllocMSpan() if s != nil { goto HaveSpan }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 78K bytes - Viewed (0)