Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 29 for Scavenged (0.21 sec)

  1. src/runtime/mpagecache.go

    			cache: ^chunk.pages64(chunkPageIndex(addr)),
    			scav:  chunk.scavenged.block64(chunkPageIndex(addr)),
    		}
    	}
    
    	// Set the page bits as allocated and clear the scavenged bits, but
    	// be careful to only set and clear the relevant bits.
    	cpi := chunkPageIndex(c.base)
    	chunk.allocPages64(cpi, c.cache)
    	chunk.scavenged.clearBlock64(cpi, c.cache&c.scav /* free and scavenged */)
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Apr 19 14:30:00 UTC 2023
    - 5.6K bytes
    - Viewed (0)
  2. src/runtime/mgcscavenge_test.go

    // the ranges of described in alloc and scavenge.
    func makePallocData(alloc, scavenged []BitRange) *PallocData {
    	b := new(PallocData)
    	for _, v := range alloc {
    		if v.N == 0 {
    			// Skip N==0. It's harmless and allocRange doesn't
    			// handle this case.
    			continue
    		}
    		b.AllocRange(v.I, v.N)
    	}
    	for _, v := range scavenged {
    		if v.N == 0 {
    			// See the previous loop.
    			continue
    		}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 25.2K bytes
    - Viewed (0)
  3. src/runtime/mgcscavenge.go

    		scavenger.sleep(workTime)
    	}
    }
    
    // scavenge scavenges nbytes worth of free pages, starting with the
    // highest address first. Successive calls continue from where it left
    // off until the heap is exhausted. force makes all memory available to
    // scavenge, ignoring huge page heuristics.
    //
    // Returns the amount of memory scavenged in bytes.
    //
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:48:45 UTC 2024
    - 52.3K bytes
    - Viewed (0)
  4. src/runtime/mpallocbits.go

    	// Clear the scavenged bits when we alloc the range.
    	m.pallocBits.allocRange(i, n)
    	m.scavenged.clearRange(i, n)
    }
    
    // allocAll sets every bit in the bitmap to 1 and updates
    // the scavenged bits appropriately.
    func (m *pallocData) allocAll() {
    	// Clear the scavenged bits when we alloc the range.
    	m.pallocBits.allocAll()
    	m.scavenged.clearAll()
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sat May 18 15:13:43 UTC 2024
    - 12.5K bytes
    - Viewed (0)
  5. src/runtime/export_test.go

    	s.scavenger.wake()
    }
    
    // Stop cleans up the scavenger's resources. The scavenger
    // must be parked for this to work.
    func (s *Scavenger) Stop() {
    	lock(&s.scavenger.lock)
    	parked := s.scavenger.parked
    	unlock(&s.scavenger.lock)
    	if !parked {
    		panic("tried to clean up scavenger that is not parked")
    	}
    	close(s.stop)
    	s.Wake()
    	<-s.done
    }
    
    type ScavengeIndex struct {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:50:53 UTC 2024
    - 46.1K bytes
    - Viewed (0)
  6. src/runtime/mpagealloc.go

    	inUse addrRanges
    
    	// scav stores the scavenger state.
    	scav struct {
    		// index is an efficient index of chunks that have pages available to
    		// scavenge.
    		index scavengeIndex
    
    		// releasedBg is the amount of memory released in the background this
    		// scavenge cycle.
    		releasedBg atomic.Uintptr
    
    		// releasedEager is the amount of memory released eagerly this scavenge
    		// cycle.
    		releasedEager atomic.Uintptr
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 39.2K bytes
    - Viewed (0)
  7. src/runtime/mpagealloc_test.go

    			t.Errorf("chunk %d nilness mismatch", i)
    		}
    		if !checkPallocBits(t, gb.PallocBits(), wb.PallocBits()) {
    			t.Logf("in chunk %d (mallocBits)", i)
    		}
    		if !checkPallocBits(t, gb.Scavenged(), wb.Scavenged()) {
    			t.Logf("in chunk %d (scavenged)", i)
    		}
    	}
    	// TODO(mknyszek): Verify summaries too?
    }
    
    func TestPageAllocGrow(t *testing.T) {
    	if GOOS == "openbsd" && testing.Short() {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Dec 06 19:16:48 UTC 2021
    - 32.6K bytes
    - Viewed (0)
  8. src/runtime/testdata/testprog/gc.go

    		allocTotal = 32 << 20
    
    		// The page cache could hide 64 8-KiB pages from the scavenger today.
    		maxPageCache = (8 << 10) * 64
    	)
    
    	// How big the allocations are needs to depend on the page size.
    	// If the page size is too big and the allocations are too small,
    	// they might not be aligned to the physical page size, so the scavenger
    	// will gloss over them.
    	pageSize := os.Getpagesize()
    	var allocChunk int
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sun Oct 02 02:28:27 UTC 2022
    - 12.1K bytes
    - Viewed (0)
  9. src/runtime/mheap.go

    		}
    		scavenge.assistTime.Add(now - start)
    	}
    
    	// Initialize the span.
    	h.initSpan(s, typ, spanclass, base, npages)
    
    	// Commit and account for any scavenged memory that the span now owns.
    	nbytes := npages * pageSize
    	if scav != 0 {
    		// sysUsed all the pages that are actually available
    		// in the span since some of them might be scavenged.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 78K bytes
    - Viewed (0)
  10. src/runtime/malloc_test.go

    	}
    }
    
    func TestScavengedBitsCleared(t *testing.T) {
    	var mismatches [128]BitsMismatch
    	if n, ok := CheckScavengedBitsCleared(mismatches[:]); !ok {
    		t.Errorf("uncleared scavenged bits")
    		for _, m := range mismatches[:n] {
    			t.Logf("\t@ address 0x%x", m.Base)
    			t.Logf("\t|  got: %064b", m.Got)
    			t.Logf("\t| want: %064b", m.Want)
    		}
    		t.FailNow()
    	}
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Sep 05 23:35:29 UTC 2023
    - 10.6K bytes
    - Viewed (0)
Back to top