Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 20 for Scavenged (0.43 sec)

  1. src/runtime/mgcscavenge_test.go

    // the ranges of described in alloc and scavenge.
    func makePallocData(alloc, scavenged []BitRange) *PallocData {
    	b := new(PallocData)
    	for _, v := range alloc {
    		if v.N == 0 {
    			// Skip N==0. It's harmless and allocRange doesn't
    			// handle this case.
    			continue
    		}
    		b.AllocRange(v.I, v.N)
    	}
    	for _, v := range scavenged {
    		if v.N == 0 {
    			// See the previous loop.
    			continue
    		}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 25.2K bytes
    - Viewed (0)
  2. src/runtime/mgcscavenge.go

    		scavenger.sleep(workTime)
    	}
    }
    
    // scavenge scavenges nbytes worth of free pages, starting with the
    // highest address first. Successive calls continue from where it left
    // off until the heap is exhausted. force makes all memory available to
    // scavenge, ignoring huge page heuristics.
    //
    // Returns the amount of memory scavenged in bytes.
    //
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:48:45 UTC 2024
    - 52.3K bytes
    - Viewed (0)
  3. src/runtime/mpallocbits.go

    	// Clear the scavenged bits when we alloc the range.
    	m.pallocBits.allocRange(i, n)
    	m.scavenged.clearRange(i, n)
    }
    
    // allocAll sets every bit in the bitmap to 1 and updates
    // the scavenged bits appropriately.
    func (m *pallocData) allocAll() {
    	// Clear the scavenged bits when we alloc the range.
    	m.pallocBits.allocAll()
    	m.scavenged.clearAll()
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sat May 18 15:13:43 UTC 2024
    - 12.5K bytes
    - Viewed (0)
  4. src/runtime/export_test.go

    	s.scavenger.wake()
    }
    
    // Stop cleans up the scavenger's resources. The scavenger
    // must be parked for this to work.
    func (s *Scavenger) Stop() {
    	lock(&s.scavenger.lock)
    	parked := s.scavenger.parked
    	unlock(&s.scavenger.lock)
    	if !parked {
    		panic("tried to clean up scavenger that is not parked")
    	}
    	close(s.stop)
    	s.Wake()
    	<-s.done
    }
    
    type ScavengeIndex struct {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:50:53 UTC 2024
    - 46.1K bytes
    - Viewed (0)
  5. src/runtime/mpagealloc.go

    	inUse addrRanges
    
    	// scav stores the scavenger state.
    	scav struct {
    		// index is an efficient index of chunks that have pages available to
    		// scavenge.
    		index scavengeIndex
    
    		// releasedBg is the amount of memory released in the background this
    		// scavenge cycle.
    		releasedBg atomic.Uintptr
    
    		// releasedEager is the amount of memory released eagerly this scavenge
    		// cycle.
    		releasedEager atomic.Uintptr
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 39.2K bytes
    - Viewed (0)
  6. src/runtime/mheap.go

    		}
    		scavenge.assistTime.Add(now - start)
    	}
    
    	// Initialize the span.
    	h.initSpan(s, typ, spanclass, base, npages)
    
    	// Commit and account for any scavenged memory that the span now owns.
    	nbytes := npages * pageSize
    	if scav != 0 {
    		// sysUsed all the pages that are actually available
    		// in the span since some of them might be scavenged.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 78K bytes
    - Viewed (0)
  7. src/runtime/mgcpacer.go

    	// only heap object memory. Intuitively, the way we convert from one to the other is to
    	// subtract everything from memoryLimit that both contributes to the memory limit (so,
    	// ignore scavenged memory) and doesn't contain heap objects. This isn't quite what
    	// lines up with reality, but it's a good starting point.
    	//
    	// In practice this computation looks like the following:
    	//
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 55.4K bytes
    - Viewed (0)
  8. src/runtime/mgcsweep.go

    		// close to done sweeping.
    
    		// Move the scavenge gen forward (signaling
    		// that there's new work to do) and wake the scavenger.
    		//
    		// The scavenger is signaled by the last sweeper because once
    		// sweeping is done, we will definitely have useful work for
    		// the scavenger to do, since the scavenger only runs over the
    		// heap once per GC cycle. This update is not done during sweep
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:52:18 UTC 2024
    - 32.9K bytes
    - Viewed (0)
  9. src/runtime/mklockrank.go

    const ranks = `
    # Sysmon
    NONE
    < sysmon
    < scavenge, forcegc;
    
    # Defer
    NONE < defer;
    
    # GC
    NONE <
      sweepWaiters,
      assistQueue,
      sweep;
    
    # Test only
    NONE < testR, testW;
    
    NONE < timerSend;
    
    # Scheduler, timers, netpoll
    NONE < allocmW, execW, cpuprof, pollCache, pollDesc, wakeableSleep;
    scavenge, sweep, testR, wakeableSleep, timerSend < hchan;
    assistQueue,
      cpuprof,
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:47:01 UTC 2024
    - 9.1K bytes
    - Viewed (0)
  10. src/runtime/mstats.go

    	scavAssistCpu := scavenge.assistTime.Load()
    	scavBgCpu := scavenge.backgroundTime.Load()
    
    	// Update cumulative GC CPU stats.
    	s.GCAssistTime += markAssistCpu
    	s.GCDedicatedTime += markDedicatedCpu + markFractionalCpu
    	s.GCIdleTime += markIdleCpu
    	s.GCTotalTime += markAssistCpu + markDedicatedCpu + markFractionalCpu + markIdleCpu
    
    	// Update cumulative scavenge CPU stats.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Apr 08 21:03:13 UTC 2024
    - 34.2K bytes
    - Viewed (0)
Back to top