- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 223 for heapUp (0.15 sec)
-
src/internal/trace/batchcursor.go
heapSiftDown(heap, i) } func heapRemove(heap []*batchCursor, i int) []*batchCursor { // Sift index i up to the root, ignoring actual values. for i > 0 { heap[(i-1)/2], heap[i] = heap[i], heap[(i-1)/2] i = (i - 1) / 2 } // Swap the root with the last element, then remove it. heap[0], heap[len(heap)-1] = heap[len(heap)-1], heap[0] heap = heap[:len(heap)-1] // Sift the root down. heapSiftDown(heap, 0)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 4.1K bytes - Viewed (0) -
src/runtime/debuglog.go
throw("failed to allocate debug log") } l.w.r.data = &l.w.data l.owned.Store(1) // Prepend to allDloggers list. headp := (*uintptr)(unsafe.Pointer(&allDloggers)) for { head := atomic.Loaduintptr(headp) l.allLink = (*dlogger)(unsafe.Pointer(head)) if atomic.Casuintptr(headp, head, uintptr(unsafe.Pointer(l))) { break } } } // If the time delta is getting too high, write a new sync
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 02 15:10:48 UTC 2024 - 18.3K bytes - Viewed (0) -
testing/internal-performance-testing/src/templates/root-project/pom.xml
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Thu Apr 04 07:21:38 UTC 2024 - 3.1K bytes - Viewed (0) -
src/runtime/mgcscavenge.go
// the heap goal is defined in terms of bytes of objects, rather than pages like // RSS. As a result, we need to take into account for fragmentation internal to // spans. heapGoal / lastHeapGoal defines the ratio between the current heap goal // and the last heap goal, which tells us by how much the heap is growing and // shrinking. We estimate what the heap will grow to in terms of pages by taking
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:48:45 UTC 2024 - 52.3K bytes - Viewed (0) -
src/cmd/link/internal/ld/outbuf_mmap.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Mar 19 11:20:31 UTC 2024 - 1.4K bytes - Viewed (0) -
src/runtime/traceexp.go
// Experimental heap span events. IDs map reversibly to base addresses. traceEvSpan // heap span exists [timestamp, id, npages, type/class] traceEvSpanAlloc // heap span alloc [timestamp, id, npages, type/class] traceEvSpanFree // heap span free [timestamp, id] // Experimental heap object events. IDs map reversibly to addresses. traceEvHeapObject // heap object exists [timestamp, id, type]
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:47:01 UTC 2024 - 2.4K bytes - Viewed (0) -
src/cmd/link/internal/ld/outbuf_nommap.go
package ld // Mmap allocates an in-heap output buffer with the given size. It copies // any old data (if any) to the new buffer. func (out *OutBuf) Mmap(filesize uint64) error { // We need space to put all the symbols before we apply relocations. oldheap := out.heap if filesize < uint64(len(oldheap)) { panic("mmap size too small") } out.heap = make([]byte, filesize) copy(out.heap, oldheap) return nil }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Mar 19 11:20:31 UTC 2024 - 660 bytes - Viewed (0) -
src/runtime/mstats.go
// Mallocs is the cumulative count of heap objects allocated. // The number of live objects is Mallocs - Frees. Mallocs uint64 // Frees is the cumulative count of heap objects freed. Frees uint64 // Heap memory statistics. // // Interpreting the heap statistics requires some knowledge of // how Go organizes memory. Go divides the virtual address // space of the heap into "spans", which are contiguous
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 08 21:03:13 UTC 2024 - 34.2K bytes - Viewed (0) -
src/internal/abi/escape.go
import "unsafe" // NoEscape hides the pointer p from escape analysis, preventing it // from escaping to the heap. It compiles down to nothing. // // WARNING: This is very subtle to use correctly. The caller must // ensure that it's truly safe for p to not escape to the heap by // maintaining runtime pointer invariants (for example, that globals // and the heap may not generally point into a stack). // //go:nosplit //go:nocheckptr
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 18 21:25:11 UTC 2024 - 884 bytes - Viewed (0) -
guava/src/com/google/common/collect/MinMaxPriorityQueue.java
Heap heap; if (crossOver == index) { heap = this; } else { index = crossOver; heap = otherHeap; } heap.bubbleUpAlternatingLevels(index, x); } /** * Bubbles a value from {@code index} up the levels of this heap, and returns the index the * element ended up at. */ @CanIgnoreReturnValue
Registered: Wed Jun 12 16:38:11 UTC 2024 - Last Modified: Thu Feb 22 21:19:52 UTC 2024 - 34K bytes - Viewed (1)