- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 229 for mheap (0.13 sec)
-
src/runtime/mgcsweep.go
releasedEager := mheap_.pages.scav.releasedEager.Load() // Print the line. printScavTrace(releasedBg, releasedEager, false) // Update the stats. mheap_.pages.scav.releasedBg.Add(-releasedBg) mheap_.pages.scav.releasedEager.Add(-releasedEager) unlock(&mheap_.lock) }) } scavenger.ready() } gp.m.locks-- return npages }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:52:18 UTC 2024 - 32.9K bytes - Viewed (0) -
src/runtime/arena.go
// Each chunk must be a multiple of the heap arena size, or the heap arena size must // be divisible by the arena chunks. The address space for each chunk, and each // corresponding heapArena for that address space, are eternally reserved for use as // arena chunks. That is, they can never be used for the general heap. Each chunk // is also represented by a single mspan, and is modeled as a single large heap
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:44:56 UTC 2024 - 37.9K bytes - Viewed (0) -
src/runtime/lockrank.go
lockRankGscan: "gscan", lockRankStackpool: "stackpool", lockRankStackLarge: "stackLarge", lockRankHchanLeaf: "hchanLeaf", lockRankWbufSpans: "wbufSpans", lockRankMheap: "mheap", lockRankMheapSpecial: "mheapSpecial", lockRankGlobalAlloc: "globalAlloc", lockRankTrace: "trace", lockRankTraceStackTab: "traceStackTab", lockRankPanic: "panic",
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:47:01 UTC 2024 - 19.9K bytes - Viewed (0) -
src/runtime/mpagealloc.go
// time, but may take time proportional to the size of the mapped heap beyond that. // // The heap lock must not be held over this operation, since it will briefly acquire // the heap lock. // // Must be called on the system stack because it acquires the heap lock. // //go:systemstack func (p *pageAlloc) enableChunkHugePages() { // Grab the heap lock to turn on huge pages for new chunks and clone the current
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 39.2K bytes - Viewed (0) -
src/runtime/malloc_test.go
t.FailNow() } } type acLink struct { x [1 << 20]byte } var arenaCollisionSink []*acLink func TestArenaCollision(t *testing.T) { testenv.MustHaveExec(t) // Test that mheap.sysAlloc handles collisions with other // memory mappings. if os.Getenv("TEST_ARENA_COLLISION") != "1" { cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=^TestArenaCollision$", "-test.v"))
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Sep 05 23:35:29 UTC 2023 - 10.6K bytes - Viewed (0) -
src/runtime/mstats.go
// Mallocs is the cumulative count of heap objects allocated. // The number of live objects is Mallocs - Frees. Mallocs uint64 // Frees is the cumulative count of heap objects freed. Frees uint64 // Heap memory statistics. // // Interpreting the heap statistics requires some knowledge of // how Go organizes memory. Go divides the virtual address // space of the heap into "spans", which are contiguous
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 08 21:03:13 UTC 2024 - 34.2K bytes - Viewed (0) -
src/runtime/metrics.go
a.gcCyclesForced = uint64(memstats.numforcedgc) systemstack(func() { lock(&mheap_.lock) a.mSpanSys = memstats.mspan_sys.load() a.mSpanInUse = uint64(mheap_.spanalloc.inuse) a.mCacheSys = memstats.mcache_sys.load() a.mCacheInUse = uint64(mheap_.cachealloc.inuse) unlock(&mheap_.lock) }) } // cpuStatsAggregate represents CPU stats obtained from the runtime
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 08 21:03:13 UTC 2024 - 26K bytes - Viewed (0) -
src/runtime/mcache.go
} // dummy mspan that contains no free objects. var emptymspan mspan func allocmcache() *mcache { var c *mcache systemstack(func() { lock(&mheap_.lock) c = (*mcache)(mheap_.cachealloc.alloc()) c.flushGen.Store(mheap_.sweepgen) unlock(&mheap_.lock) }) for i := range c.alloc { c.alloc[i] = &emptymspan } c.nextSample = nextSample() return c }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 10K bytes - Viewed (0) -
src/internal/trace/gc.go
// Keep the first window. goto keep } else { // Replace it with this window. heap.Remove(&acc.wHeap, i) break } } } heap.Push(&acc.wHeap, UtilWindow{time, mu}) if len(acc.wHeap) > acc.nWorst { heap.Pop(&acc.wHeap) } keep: } if len(acc.wHeap) < acc.nWorst { // We don't have N windows yet, so keep accumulating. acc.bound = 1.0 } else {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 26K bytes - Viewed (0) -
src/runtime/mgcwork.go
// obj must point to the beginning of a heap object or an oblet. // //go:nowritebarrierrec func (w *gcWork) put(obj uintptr) { flushed := false wbuf := w.wbuf1 // Record that this may acquire the wbufSpans or heap lock to // allocate a workbuf. lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans) lockWithRankMayAcquire(&mheap_.lock, lockRankMheap) if wbuf == nil { w.init()
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 12.9K bytes - Viewed (0)