Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 538 for alignUp (0.44 sec)

  1. src/runtime/pinner.go

    func (s *mspan) refreshPinnerBits() {
    	p := s.getPinnerBits()
    	if p == nil {
    		return
    	}
    
    	hasPins := false
    	bytes := alignUp(s.pinnerBitSize(), 8)
    
    	// Iterate over each 8-byte chunk and check for pins. Note that
    	// newPinnerBits guarantees that pinnerBits will be 8-byte aligned, so we
    	// don't have to worry about edge cases, irrelevant bits will simply be
    	// zero.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 04 14:29:45 UTC 2024
    - 11K bytes
    - Viewed (0)
  2. src/runtime/stkframe.go

    			mod = datap
    			break
    		}
    	}
    	if mod == nil {
    		throw("methodValueCallFrameObjs is not in a module")
    	}
    	methodValueCallFrameObjs[0] = stackObjectRecord{
    		off:       -int32(alignUp(abiRegArgsType.Size_, 8)), // It's always the highest address local.
    		size:      int32(abiRegArgsType.Size_),
    		_ptrdata:  int32(abiRegArgsType.PtrBytes),
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Apr 02 15:10:48 UTC 2024
    - 9.9K bytes
    - Viewed (0)
  3. src/runtime/mgcscavenge.go

    	} else if minimum > maxPagesPerPhysPage {
    		print("runtime: min = ", minimum, "\n")
    		throw("min too large")
    	}
    	// max may not be min-aligned, so we might accidentally truncate to
    	// a max value which causes us to return a non-min-aligned value.
    	// To prevent this, align max up to a multiple of min (which is always
    	// a power of 2). This also prevents max from ever being less than
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:48:45 UTC 2024
    - 52.3K bytes
    - Viewed (0)
  4. src/runtime/mranges.go

    	}
    	return a
    }
    
    // takeFromFront takes len bytes from the front of the address range, aligning
    // the base to align first. On success, returns the aligned start of the region
    // taken and true.
    func (a *addrRange) takeFromFront(len uintptr, align uint8) (uintptr, bool) {
    	base := alignUp(a.base.addr(), uintptr(align)) + len
    	if base > a.limit.addr() {
    		return 0, false
    	}
    	a.base = offAddr{base}
    	return base - len, true
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 14.5K bytes
    - Viewed (0)
  5. src/runtime/mbitmap.go

    			if span.spanclass.sizeclass() != 0 {
    				throw("GCProg for type that isn't large")
    			}
    			spaceNeeded := alignUp(unsafe.Sizeof(_type{}), goarch.PtrSize)
    			heapBitsOff := spaceNeeded
    			spaceNeeded += alignUp(typ.PtrBytes/goarch.PtrSize/8, goarch.PtrSize)
    			npages := alignUp(spaceNeeded, pageSize) / pageSize
    			var progSpan *mspan
    			systemstack(func() {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 00:18:55 UTC 2024
    - 60K bytes
    - Viewed (0)
  6. src/runtime/export_test.go

    			n++
    		}
    		unlock(&mheap_.lock)
    	})
    	return n
    }
    
    func UserArenaClone[T any](s T) T {
    	return arena_heapify(s).(T)
    }
    
    var AlignUp = alignUp
    
    func BlockUntilEmptyFinalizerQueue(timeout int64) bool {
    	return blockUntilEmptyFinalizerQueue(timeout)
    }
    
    func FrameStartLine(f *Frame) int {
    	return f.startLine
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:50:53 UTC 2024
    - 46.1K bytes
    - Viewed (0)
  7. src/runtime/race.go

    		end = firstmoduledata.edata
    	}
    	if end < firstmoduledata.enoptrbss {
    		end = firstmoduledata.enoptrbss
    	}
    	if end < firstmoduledata.ebss {
    		end = firstmoduledata.ebss
    	}
    	size := alignUp(end-start, _PageSize)
    	racecall(&__tsan_map_shadow, start, size, 0, 0)
    	racedatastart = start
    	racedataend = start + size
    
    	return
    }
    
    //go:nosplit
    func racefini() {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:37:29 UTC 2024
    - 20.4K bytes
    - Viewed (0)
  8. src/runtime/mheap.go

    	// of MiB (generally >= to the huge page size) we
    	// won't be calling it too much.
    	ask := alignUp(npage, pallocChunkPages) * pageSize
    
    	totalGrowth := uintptr(0)
    	// This may overflow because ask could be very large
    	// and is otherwise unrelated to h.curArena.base.
    	end := h.curArena.base + ask
    	nBase := alignUp(end, physPageSize)
    	if nBase > h.curArena.end || /* overflow */ end < h.curArena.base {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 78K bytes
    - Viewed (0)
  9. src/runtime/mpagealloc.go

    	return
    }
    
    // blockAlignSummaryRange aligns indices into the given level to that
    // level's block width (1 << levelBits[level]). It assumes lo is inclusive
    // and hi is exclusive, and so aligns them down and up respectively.
    func blockAlignSummaryRange(level int, lo, hi int) (int, int) {
    	e := uintptr(1) << levelBits[level]
    	return int(alignDown(uintptr(lo), e)), int(alignUp(uintptr(hi), e))
    }
    
    type pageAlloc struct {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 39.2K bytes
    - Viewed (0)
  10. src/runtime/cgocall.go

    	if *restore {
    		// Restore sp saved by cgocallback during
    		// unwind of g's stack (see comment at top of file).
    		mp := acquirem()
    		sched := &mp.g0.sched
    		sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + alignUp(sys.MinFrameSize, sys.StackAlign)))
    
    		// Do the accounting that cgocall will not have a chance to do
    		// during an unwind.
    		//
    		// In the case where a Go call originates from C, ncgo is 0
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 01:16:47 UTC 2024
    - 24.2K bytes
    - Viewed (0)
Back to top