- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 553 for mheap (0.15 sec)
-
src/runtime/mcache.go
} // dummy mspan that contains no free objects. var emptymspan mspan func allocmcache() *mcache { var c *mcache systemstack(func() { lock(&mheap_.lock) c = (*mcache)(mheap_.cachealloc.alloc()) c.flushGen.Store(mheap_.sweepgen) unlock(&mheap_.lock) }) for i := range c.alloc { c.alloc[i] = &emptymspan } c.nextSample = nextSample() return c }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 10K bytes - Viewed (0) -
src/runtime/mgcpacer.go
// is based on a steady-state scannable heap size, we assume this means our // heap is growing. Compute a new heap goal that takes our existing runway // computed for scanWorkExpected and extrapolates it to maxScanWork, the worst-case // scan work. This keeps our assist ratio stable if the heap continues to grow. // // The effect of this mechanism is that assists stay flat in the face of heap
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 55.4K bytes - Viewed (0) -
src/internal/trace/gc.go
// Keep the first window. goto keep } else { // Replace it with this window. heap.Remove(&acc.wHeap, i) break } } } heap.Push(&acc.wHeap, UtilWindow{time, mu}) if len(acc.wHeap) > acc.nWorst { heap.Pop(&acc.wHeap) } keep: } if len(acc.wHeap) < acc.nWorst { // We don't have N windows yet, so keep accumulating. acc.bound = 1.0 } else {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 26K bytes - Viewed (0) -
src/internal/trace/testdata/tests/go122-gc-stress.test
data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/asm_amd64.s" String id=66 data="runtime.traceLocker.GCMarkAssistStart" String id=67 data="runtime.(*mheap).alloc" String id=68 data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mheap.go" String id=69 data="runtime.(*mcache).allocLarge" String id=70 data="runtime.chanrecv1" String id=71
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 139.1K bytes - Viewed (0) -
staging/src/k8s.io/apimachinery/pkg/util/cache/expiring.go
} func (c *Expiring) gc(now time.Time) { for { // Return from gc if the heap is empty or the next element is not yet // expired. // // heap[0] is a peek at the next element in the heap, which is not obvious // from looking at the (*expiringHeap).Pop() implementation below. // heap.Pop() swaps the first entry with the last entry of the heap, then // calls (*expiringHeap).Pop() which returns the last element.
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Mon May 22 15:51:23 UTC 2023 - 5.6K bytes - Viewed (0) -
src/runtime/mgcwork.go
// obj must point to the beginning of a heap object or an oblet. // //go:nowritebarrierrec func (w *gcWork) put(obj uintptr) { flushed := false wbuf := w.wbuf1 // Record that this may acquire the wbufSpans or heap lock to // allocate a workbuf. lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans) lockWithRankMayAcquire(&mheap_.lock, lockRankMheap) if wbuf == nil { w.init()
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 12.9K bytes - Viewed (0) -
src/runtime/traceallocfree.go
throw("traceSnapshotMemory: tracing is not enabled") } // Write out all the heap spans and heap objects. for _, s := range mheap_.allspans { if s.state.get() == mSpanDead { continue } // It's some kind of span, so trace that it exists. trace.SpanExists(s) // Write out allocated objects if it's a heap span. if s.state.get() != mSpanInUse { continue } // Find all allocated objects.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 20:32:51 UTC 2024 - 5.9K bytes - Viewed (0) -
src/cmd/link/internal/ld/outbuf.go
// The heap variables aren't protected by a mutex. For now, just bomb if you // try to use OutBuf in parallel. (Note this probably could be fixed.) if out.isView { panic("cannot write to heap in parallel") } // See if our heap would grow to be too large, and if so, copy it to the end // of the mmapped area. if heapLen > maxOutBufHeapLen && out.copyHeap() { heapPos -= heapLen
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 17 19:51:29 UTC 2022 - 8.1K bytes - Viewed (0) -
src/runtime/heapdump.go
} else { dumpbool(true) // big-endian ptrs } dumpint(goarch.PtrSize) var arenaStart, arenaEnd uintptr for i1 := range mheap_.arenas { if mheap_.arenas[i1] == nil { continue } for i, ha := range mheap_.arenas[i1] { if ha == nil { continue } base := arenaBase(arenaIdx(i1)<<arenaL1Shift | arenaIdx(i)) if arenaStart == 0 || base < arenaStart {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 09 04:07:57 UTC 2024 - 17.6K bytes - Viewed (0) -
src/runtime/mbarrier.go
// the ptr object regardless of the slot's color. // // Another place where we intentionally omit memory barriers is when // accessing mheap_.arena_used to check if a pointer points into the // heap. On relaxed memory machines, it's possible for a mutator to // extend the size of the heap by updating arena_used, allocate an // object from this new region, and publish a pointer to that object,
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 15.7K bytes - Viewed (0)