Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 9 of 9 for alignDown (0.2 sec)

  1. src/runtime/mpagecache.go

    		chunk = p.chunkOf(ci)
    		j, _ := chunk.find(1, chunkPageIndex(p.searchAddr.addr()))
    		if j == ^uint(0) {
    			throw("bad summary data")
    		}
    		c = pageCache{
    			base:  chunkBase(ci) + alignDown(uintptr(j), 64)*pageSize,
    			cache: ^chunk.pages64(j),
    			scav:  chunk.scavenged.block64(j),
    		}
    	} else {
    		// Slow path: the searchAddr address had nothing there, so go find
    		// the first free page the slow way.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Apr 19 14:30:00 UTC 2023
    - 5.6K bytes
    - Viewed (0)
  2. src/runtime/mpagealloc_64bit.go

    	// level of p.summary into page-aligned addresses which cover that
    	// range of indices.
    	summaryRangeToSumAddrRange := func(level, sumIdxBase, sumIdxLimit int) addrRange {
    		baseOffset := alignDown(uintptr(sumIdxBase)*pallocSumBytes, physPageSize)
    		limitOffset := alignUp(uintptr(sumIdxLimit)*pallocSumBytes, physPageSize)
    		base := unsafe.Pointer(&p.summary[level][0])
    		return addrRange{
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Jan 03 11:00:10 UTC 2024
    - 9.3K bytes
    - Viewed (0)
  3. src/runtime/mem_linux.go

    func sysHugePageOS(v unsafe.Pointer, n uintptr) {
    	if physHugePageSize != 0 {
    		// Round v up to a huge page boundary.
    		beg := alignUp(uintptr(v), physHugePageSize)
    		// Round v+n down to a huge page boundary.
    		end := alignDown(uintptr(v)+n, physHugePageSize)
    
    		if beg < end {
    			madvise(unsafe.Pointer(beg), end-beg, _MADV_HUGEPAGE)
    		}
    	}
    }
    
    func sysNoHugePageOS(v unsafe.Pointer, n uintptr) {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 5K bytes
    - Viewed (0)
  4. src/runtime/stubs.go

    // alignUp rounds n up to a multiple of a. a must be a power of 2.
    //
    //go:nosplit
    func alignUp(n, a uintptr) uintptr {
    	return (n + a - 1) &^ (a - 1)
    }
    
    // alignDown rounds n down to a multiple of a. a must be a power of 2.
    //
    //go:nosplit
    func alignDown(n, a uintptr) uintptr {
    	return n &^ (a - 1)
    }
    
    // divRoundUp returns ceil(n / a).
    func divRoundUp(n, a uintptr) uintptr {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 20.2K bytes
    - Viewed (0)
  5. src/cmd/compile/internal/test/inl_test.go

    	// might not actually be inlined anywhere.
    	want := map[string][]string{
    		"runtime": {
    			"add",
    			"acquirem",
    			"add1",
    			"addb",
    			"adjustpanics",
    			"adjustpointer",
    			"alignDown",
    			"alignUp",
    			"bucketMask",
    			"bucketShift",
    			"chanbuf",
    			"evacuated",
    			"fastlog2",
    			"float64bits",
    			"funcspdelta",
    			"getm",
    			"getMCache",
    			"isDirectIface",
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Apr 09 04:07:57 UTC 2024
    - 10.7K bytes
    - Viewed (0)
  6. src/runtime/mranges.go

    // the limit to align after subtracting len. On success, returns the aligned
    // start of the region taken and true.
    func (a *addrRange) takeFromBack(len uintptr, align uint8) (uintptr, bool) {
    	limit := alignDown(a.limit.addr()-len, uintptr(align))
    	if a.base.addr() > limit {
    		return 0, false
    	}
    	a.limit = offAddr{limit}
    	return limit, true
    }
    
    // removeGreaterEqual removes all addresses in a greater than or equal
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 14.5K bytes
    - Viewed (0)
  7. src/runtime/mbitmap.go

    		// tp.addr + n.
    		oldelem := tp.elem
    		tp.elem += (tp.addr - tp.elem + n) / tp.typ.Size_ * tp.typ.Size_
    		tp.addr = tp.elem + alignDown(n-(tp.elem-oldelem), ptrBits*goarch.PtrSize)
    	} else {
    		tp.addr += alignDown(n, ptrBits*goarch.PtrSize)
    	}
    
    	if tp.addr-tp.elem >= tp.typ.PtrBytes {
    		// We're starting in the non-pointer area of an array.
    		// Move up to the next element.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 00:18:55 UTC 2024
    - 60K bytes
    - Viewed (0)
  8. src/runtime/mpagealloc.go

    // and hi is exclusive, and so aligns them down and up respectively.
    func blockAlignSummaryRange(level int, lo, hi int) (int, int) {
    	e := uintptr(1) << levelBits[level]
    	return int(alignDown(uintptr(lo), e)), int(alignUp(uintptr(hi), e))
    }
    
    type pageAlloc struct {
    	// Radix tree of summaries.
    	//
    	// Each slice's cap represents the whole memory reservation.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 39.2K bytes
    - Viewed (0)
  9. src/runtime/mgcscavenge.go

    		// If that boundary is within our current candidate, then we may be breaking
    		// a huge page.
    		if hugePageAbove <= end {
    			// Compute the huge page boundary below our candidate.
    			hugePageBelow := uint(alignDown(uintptr(start), pagesPerHugePage))
    
    			if hugePageBelow >= end-run {
    				// We're in danger of breaking apart a huge page since start+size crosses
    				// a huge page boundary and rounding down start to the nearest huge
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:48:45 UTC 2024
    - 52.3K bytes
    - Viewed (0)
Back to top