Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 518 for heal (0.05 sec)

  1. src/internal/trace/reader.go

    	// (3) Group each event batch by M, sorted by timestamp. (batchCursor contains the groups.)
    	// (4) Organize batchCursors in a min-heap, ordered by the timestamp of the next event for each M.
    	// (5) Try to advance the next event for the M at the top of the min-heap.
    	//   (a) On success, select that M.
    	//   (b) On failure, sort the min-heap and try to advance other Ms. Select the first M that advances.
    	//   (c) If there's nothing left to advance, goto (1).
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:48:18 UTC 2024
    - 6.7K bytes
    - Viewed (0)
  2. src/runtime/mgcscavenge.go

    // the heap goal is defined in terms of bytes of objects, rather than pages like
    // RSS. As a result, we need to take into account for fragmentation internal to
    // spans. heapGoal / lastHeapGoal defines the ratio between the current heap goal
    // and the last heap goal, which tells us by how much the heap is growing and
    // shrinking. We estimate what the heap will grow to in terms of pages by taking
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:48:45 UTC 2024
    - 52.3K bytes
    - Viewed (0)
  3. src/runtime/mgcmark.go

    			// to a heap-allocated defer record. Keep that heap record live.
    			scanblock(uintptr(unsafe.Pointer(&d.link)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
    		}
    		// Retain defers records themselves.
    		// Defer records might not be reachable from the G through regular heap
    		// tracing because the defer linked list might weave between the stack and the heap.
    		if d.heap {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 18 21:25:11 UTC 2024
    - 52.5K bytes
    - Viewed (0)
  4. src/runtime/sema.go

    			// It would take O(N) time to calculate how long each goroutine
    			// has been waiting, so instead we charge avg(head-wait, tail-wait)*N.
    			// head-wait is the longest wait and tail-wait is the shortest.
    			// (When we do a lifo insertion, we preserve this property by
    			// copying the old head's acquiretime into the inserted new head.
    			// In that case the overall average may be slightly high, but that's fine:
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 19K bytes
    - Viewed (0)
  5. src/runtime/panic.go

    // following d0.
    // See the doc comment for deferrangefunc for details.
    func deferconvert(d0 *_defer) {
    	head := d0.head
    	if raceenabled {
    		racereadpc(unsafe.Pointer(head), getcallerpc(), abi.FuncPCABIInternal(deferconvert))
    	}
    	tail := d0.link
    	d0.rangefunc = false
    
    	var d *_defer
    	for {
    		d = head.Load()
    		if head.CompareAndSwap(d, badDefer()) {
    			break
    		}
    	}
    	if d == nil {
    		return
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 43.8K bytes
    - Viewed (0)
  6. src/runtime/malloc.go

    //
    // A note on latency: for sufficiently small heaps (<10s of GiB) this function will take constant
    // time, but may take time proportional to the size of the mapped heap beyond that.
    //
    // This function is idempotent.
    //
    // The heap lock must not be held over this operation, since it will briefly acquire
    // the heap lock.
    //
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 59.6K bytes
    - Viewed (0)
  7. src/runtime/arena.go

    // Each chunk must be a multiple of the heap arena size, or the heap arena size must
    // be divisible by the arena chunks. The address space for each chunk, and each
    // corresponding heapArena for that address space, are eternally reserved for use as
    // arena chunks. That is, they can never be used for the general heap. Each chunk
    // is also represented by a single mspan, and is modeled as a single large heap
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:44:56 UTC 2024
    - 37.9K bytes
    - Viewed (0)
  8. src/runtime/runtime2.go

    	fn        func()  // can be nil for open-coded defers
    	link      *_defer // next defer on G; can point to either heap or stack!
    
    	// If rangefunc is true, *head is the head of the atomic linked list
    	// during a range-over-func execution.
    	head *atomic.Pointer[_defer]
    }
    
    // A _panic holds information about an active panic.
    //
    // A _panic value must only ever live on the stack.
    //
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:37 UTC 2024
    - 47.9K bytes
    - Viewed (0)
  9. src/internal/trace/batchcursor.go

    	heapSiftDown(heap, i)
    }
    
    func heapRemove(heap []*batchCursor, i int) []*batchCursor {
    	// Sift index i up to the root, ignoring actual values.
    	for i > 0 {
    		heap[(i-1)/2], heap[i] = heap[i], heap[(i-1)/2]
    		i = (i - 1) / 2
    	}
    	// Swap the root with the last element, then remove it.
    	heap[0], heap[len(heap)-1] = heap[len(heap)-1], heap[0]
    	heap = heap[:len(heap)-1]
    	// Sift the root down.
    	heapSiftDown(heap, 0)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:48:18 UTC 2024
    - 4.1K bytes
    - Viewed (0)
  10. src/runtime/metrics_test.go

    		case "/memory/classes/heap/objects:bytes":
    			objects.totalBytes = samples[i].Value.Uint64()
    		case "/gc/heap/objects:objects":
    			objects.total = samples[i].Value.Uint64()
    		case "/gc/heap/allocs:bytes":
    			objects.allocdBytes = samples[i].Value.Uint64()
    		case "/gc/heap/allocs:objects":
    			objects.allocs = samples[i].Value.Uint64()
    		case "/gc/heap/allocs-by-size:bytes":
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:52:17 UTC 2024
    - 45K bytes
    - Viewed (0)
Back to top