- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 461 for mheap (0.04 sec)
-
src/internal/trace/testdata/tests/go122-gc-stress.test
data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/asm_amd64.s" String id=66 data="runtime.traceLocker.GCMarkAssistStart" String id=67 data="runtime.(*mheap).alloc" String id=68 data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mheap.go" String id=69 data="runtime.(*mcache).allocLarge" String id=70 data="runtime.chanrecv1" String id=71
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 139.1K bytes - Viewed (0) -
staging/src/k8s.io/apimachinery/pkg/util/cache/expiring.go
} func (c *Expiring) gc(now time.Time) { for { // Return from gc if the heap is empty or the next element is not yet // expired. // // heap[0] is a peek at the next element in the heap, which is not obvious // from looking at the (*expiringHeap).Pop() implementation below. // heap.Pop() swaps the first entry with the last entry of the heap, then // calls (*expiringHeap).Pop() which returns the last element.
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Mon May 22 15:51:23 UTC 2023 - 5.6K bytes - Viewed (0) -
src/runtime/mgcwork.go
// obj must point to the beginning of a heap object or an oblet. // //go:nowritebarrierrec func (w *gcWork) put(obj uintptr) { flushed := false wbuf := w.wbuf1 // Record that this may acquire the wbufSpans or heap lock to // allocate a workbuf. lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans) lockWithRankMayAcquire(&mheap_.lock, lockRankMheap) if wbuf == nil { w.init()
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 12.9K bytes - Viewed (0) -
src/runtime/traceallocfree.go
throw("traceSnapshotMemory: tracing is not enabled") } // Write out all the heap spans and heap objects. for _, s := range mheap_.allspans { if s.state.get() == mSpanDead { continue } // It's some kind of span, so trace that it exists. trace.SpanExists(s) // Write out allocated objects if it's a heap span. if s.state.get() != mSpanInUse { continue } // Find all allocated objects.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 20:32:51 UTC 2024 - 5.9K bytes - Viewed (0) -
src/runtime/heapdump.go
} else { dumpbool(true) // big-endian ptrs } dumpint(goarch.PtrSize) var arenaStart, arenaEnd uintptr for i1 := range mheap_.arenas { if mheap_.arenas[i1] == nil { continue } for i, ha := range mheap_.arenas[i1] { if ha == nil { continue } base := arenaBase(arenaIdx(i1)<<arenaL1Shift | arenaIdx(i)) if arenaStart == 0 || base < arenaStart {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 09 04:07:57 UTC 2024 - 17.6K bytes - Viewed (0) -
src/runtime/mbarrier.go
// the ptr object regardless of the slot's color. // // Another place where we intentionally omit memory barriers is when // accessing mheap_.arena_used to check if a pointer points into the // heap. On relaxed memory machines, it's possible for a mutator to // extend the size of the heap by updating arena_used, allocate an // object from this new region, and publish a pointer to that object,
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 15.7K bytes - Viewed (0) -
src/runtime/mgcmark.go
// // We're going to scan the whole heap (that was available at the time the // mark phase started, i.e. markArenas) for in-use spans which have specials. // // Break up the work into arenas, and further into chunks. // // Snapshot allArenas as markArenas. This snapshot is safe because allArenas // is append-only. mheap_.markArenas = mheap_.allArenas[:len(mheap_.allArenas):len(mheap_.allArenas)]
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 18 21:25:11 UTC 2024 - 52.5K bytes - Viewed (0) -
src/runtime/export_test.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:50:53 UTC 2024 - 46.1K bytes - Viewed (0) -
src/runtime/stack.go
s = stackLarge.free[log2npage].first stackLarge.free[log2npage].remove(s) } unlock(&stackLarge.lock) lockWithRankMayAcquire(&mheap_.lock, lockRankMheap) if s == nil { // Allocate a new stack from the heap. s = mheap_.allocManual(npage, spanAllocStack) if s == nil { throw("out of memory") } osStackAlloc(s) s.elemsize = uintptr(n) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 41.1K bytes - Viewed (0) -
src/runtime/mbitmap.go
} getg().m.traceback = 2 throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)") } // findObject returns the base address for the heap object containing // the address p, the object's span, and the index of the object in s. // If p does not point into a heap object, it returns base == 0. // // If p points is an invalid heap pointer and debug.invalidptr != 0, // findObject panics. //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 00:18:55 UTC 2024 - 60K bytes - Viewed (0)