- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 36 for mheap (0.04 sec)
-
src/runtime/mheap.go
// OpenBSD. physPageAlignedStacks = GOOS == "openbsd" ) // Main malloc heap. // The heap itself is the "free" and "scav" treaps, // but all the other global data is here too. // // mheap must not be heap-allocated because it contains mSpanLists, // which must not be heap-allocated. type mheap struct { _ sys.NotInHeap // lock must only be acquired on the system stack, otherwise a g
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 78K bytes - Viewed (0) -
src/runtime/malloc.go
a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes) if a != nil { mheap_.arena.init(uintptr(a), size, false) p = mheap_.arena.end // For hint below break } } hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) hint.addr = p hint.next, mheap_.arenaHints = mheap_.arenaHints, hint // Place the hint for user arenas just after the large reservation. //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 59.6K bytes - Viewed (0) -
src/runtime/mgcscavenge.go
// the heap goal is defined in terms of bytes of objects, rather than pages like // RSS. As a result, we need to take into account for fragmentation internal to // spans. heapGoal / lastHeapGoal defines the ratio between the current heap goal // and the last heap goal, which tells us by how much the heap is growing and // shrinking. We estimate what the heap will grow to in terms of pages by taking
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:48:45 UTC 2024 - 52.3K bytes - Viewed (0) -
src/runtime/mgc.go
mheap_.pagesSwept.Store(0) mheap_.sweepArenas = mheap_.allArenas mheap_.reclaimIndex.Store(0) mheap_.reclaimCredit.Store(0) unlock(&mheap_.lock) sweep.centralIndex.clear() if !concurrentSweep || mode == gcForceBlockMode { // Special case synchronous sweep. // Record that no proportional sweeping has to happen. lock(&mheap_.lock) mheap_.sweepPagesPerByte = 0 unlock(&mheap_.lock)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 16:25:21 UTC 2024 - 62K bytes - Viewed (0) -
src/runtime/mgcpacer.go
// is based on a steady-state scannable heap size, we assume this means our // heap is growing. Compute a new heap goal that takes our existing runway // computed for scanWorkExpected and extrapolates it to maxScanWork, the worst-case // scan work. This keeps our assist ratio stable if the heap continues to grow. // // The effect of this mechanism is that assists stay flat in the face of heap
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 55.4K bytes - Viewed (0) -
src/runtime/mgcmark.go
// // We're going to scan the whole heap (that was available at the time the // mark phase started, i.e. markArenas) for in-use spans which have specials. // // Break up the work into arenas, and further into chunks. // // Snapshot allArenas as markArenas. This snapshot is safe because allArenas // is append-only. mheap_.markArenas = mheap_.allArenas[:len(mheap_.allArenas):len(mheap_.allArenas)]
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 18 21:25:11 UTC 2024 - 52.5K bytes - Viewed (0) -
src/runtime/mbitmap.go
} getg().m.traceback = 2 throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)") } // findObject returns the base address for the heap object containing // the address p, the object's span, and the index of the object in s. // If p does not point into a heap object, it returns base == 0. // // If p points is an invalid heap pointer and debug.invalidptr != 0, // findObject panics. //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 00:18:55 UTC 2024 - 60K bytes - Viewed (0) -
src/runtime/mprof.go
return true } // mProf_NextCycle publishes the next heap profile cycle and creates a // fresh heap profile cycle. This operation is fast and can be done // during STW. The caller must call mProf_Flush before calling // mProf_NextCycle again. // // This is called by mark termination during STW so allocations and // frees after the world is started again count towards a new heap // profiling cycle. func mProf_NextCycle() {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:57:37 UTC 2024 - 53.3K bytes - Viewed (0) -
pkg/scheduler/internal/queue/scheduling_queue.go
inFlightEvents *list.List // activeQ is heap structure that scheduler actively looks at to find pods to // schedule. Head of heap is the highest priority pod. activeQ *heap.Heap // podBackoffQ is a heap ordered by backoff expiry. Pods which have completed backoff // are popped from this heap before the scheduler looks at activeQ podBackoffQ *heap.Heap
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed Jun 12 13:26:09 UTC 2024 - 61.4K bytes - Viewed (0) -
src/runtime/pprof/pprof_test.go
if err != nil { t.Fatal(err) } heap := 1 << 30 if runtime.GOOS == "android" { // Use smaller size for Android to avoid crash. heap = 100 << 20 } if runtime.GOOS == "windows" && runtime.GOARCH == "arm" { // Use smaller heap for Windows/ARM to avoid crash. heap = 100 << 20 } if testing.Short() { heap = 100 << 20 } // This makes fork slower.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 18:42:28 UTC 2024 - 68.8K bytes - Viewed (0)