- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 30 for heapUp (0.2 sec)
-
src/runtime/mgc.go
// when stopping began (just before trying to stop Ps) and just after the // world started again. pauseNS int64 // debug.gctrace heap sizes for this cycle. heap0, heap1, heap2 uint64 // Cumulative estimated CPU usage. cpuStats } // GC runs a garbage collection and blocks the caller until the // garbage collection is complete. It may also block the entire
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 16:25:21 UTC 2024 - 62K bytes - Viewed (0) -
src/runtime/mgcpacer.go
} // For small heaps, set the max trigger point at maxTriggerRatio of the way // from the live heap to the heap goal. This ensures we always have *some* // headroom when the GC actually starts. For larger heaps, set the max trigger // point at the goal, minus the minimum heap size. // // This choice follows from the fact that the minimum heap size is chosen
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 55.4K bytes - Viewed (0) -
src/runtime/malloc.go
// be very high if they were to be backed by huge pages (e.g. a few MiB makes // a huge difference for an 8 MiB heap, but barely any difference for a 1 GiB // heap). The benefit of huge pages is also not worth it for small heaps, // because only a very, very small part of the metadata is used for small heaps. //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 59.6K bytes - Viewed (0) -
src/runtime/mheap.go
// arenaHint is a hint for where to grow the heap arenas. See // mheap_.arenaHints. type arenaHint struct { _ sys.NotInHeap addr uintptr down bool next *arenaHint } // An mspan is a run of pages. // // When a mspan is in the heap free treap, state == mSpanFree // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span. // If the mspan is in the heap scav treap, then in addition to the
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 78K bytes - Viewed (0) -
src/runtime/mgcscavenge.go
// the heap goal is defined in terms of bytes of objects, rather than pages like // RSS. As a result, we need to take into account for fragmentation internal to // spans. heapGoal / lastHeapGoal defines the ratio between the current heap goal // and the last heap goal, which tells us by how much the heap is growing and // shrinking. We estimate what the heap will grow to in terms of pages by taking
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:48:45 UTC 2024 - 52.3K bytes - Viewed (0) -
src/runtime/mbitmap.go
} getg().m.traceback = 2 throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)") } // findObject returns the base address for the heap object containing // the address p, the object's span, and the index of the object in s. // If p does not point into a heap object, it returns base == 0. // // If p points is an invalid heap pointer and debug.invalidptr != 0, // findObject panics. //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 00:18:55 UTC 2024 - 60K bytes - Viewed (0) -
src/runtime/mgcmark.go
// to a heap-allocated defer record. Keep that heap record live. scanblock(uintptr(unsafe.Pointer(&d.link)), goarch.PtrSize, &oneptrmask[0], gcw, &state) } // Retain defers records themselves. // Defer records might not be reachable from the G through regular heap // tracing because the defer linked list might weave between the stack and the heap. if d.heap {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 18 21:25:11 UTC 2024 - 52.5K bytes - Viewed (0) -
src/runtime/mprof.go
return true } // mProf_NextCycle publishes the next heap profile cycle and creates a // fresh heap profile cycle. This operation is fast and can be done // during STW. The caller must call mProf_Flush before calling // mProf_NextCycle again. // // This is called by mark termination during STW so allocations and // frees after the world is started again count towards a new heap // profiling cycle. func mProf_NextCycle() {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:57:37 UTC 2024 - 53.3K bytes - Viewed (0) -
pkg/scheduler/internal/queue/scheduling_queue.go
inFlightEvents *list.List // activeQ is heap structure that scheduler actively looks at to find pods to // schedule. Head of heap is the highest priority pod. activeQ *heap.Heap // podBackoffQ is a heap ordered by backoff expiry. Pods which have completed backoff // are popped from this heap before the scheduler looks at activeQ podBackoffQ *heap.Heap
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed Jun 12 13:26:09 UTC 2024 - 61.4K bytes - Viewed (0) -
src/runtime/pprof/pprof_test.go
if err != nil { t.Fatal(err) } heap := 1 << 30 if runtime.GOOS == "android" { // Use smaller size for Android to avoid crash. heap = 100 << 20 } if runtime.GOOS == "windows" && runtime.GOARCH == "arm" { // Use smaller heap for Windows/ARM to avoid crash. heap = 100 << 20 } if testing.Short() { heap = 100 << 20 } // This makes fork slower.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 18:42:28 UTC 2024 - 68.8K bytes - Viewed (0)