- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 228 for alignUp (0.21 sec)
-
src/runtime/syscall_windows.go
// caller reserved spill space. p.dstSpill = alignUp(p.dstSpill, uintptr(t.Align_)) p.dstSpill += t.Size_ } else { // Register assignment failed. // Undo the work and stack assign. p.parts = oldParts // The Go ABI aligns arguments. p.dstStackSize = alignUp(p.dstStackSize, uintptr(t.Align_)) // Copy just the size of the argument. Note that this
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 20:12:46 UTC 2024 - 16.6K bytes - Viewed (0) -
src/runtime/stubs.go
func call1073741824(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs) func systemstack_switch() // alignUp rounds n up to a multiple of a. a must be a power of 2. // //go:nosplit func alignUp(n, a uintptr) uintptr { return (n + a - 1) &^ (a - 1) } // alignDown rounds n down to a multiple of a. a must be a power of 2. // //go:nosplit
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 20.2K bytes - Viewed (0) -
src/runtime/mfinal.go
okarg: // compute size needed for return parameters nret := uintptr(0) for _, t := range ft.OutSlice() { nret = alignUp(nret, uintptr(t.Align_)) + t.Size_ } nret = alignUp(nret, goarch.PtrSize) // make sure we have a finalizer goroutine createfing() systemstack(func() { if !addfinalizer(e.data, (*funcval)(f.data), nret, fint, ot) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 01:56:56 UTC 2024 - 19K bytes - Viewed (0) -
src/cmd/compile/internal/test/inl_test.go
// might not actually be inlined anywhere. want := map[string][]string{ "runtime": { "add", "acquirem", "add1", "addb", "adjustpanics", "adjustpointer", "alignDown", "alignUp", "bucketMask", "bucketShift", "chanbuf", "evacuated", "fastlog2", "float64bits", "funcspdelta", "getm", "getMCache", "isDirectIface", "itabHashFunc",
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 09 04:07:57 UTC 2024 - 10.7K bytes - Viewed (0) -
src/runtime/pinner.go
func (s *mspan) refreshPinnerBits() { p := s.getPinnerBits() if p == nil { return } hasPins := false bytes := alignUp(s.pinnerBitSize(), 8) // Iterate over each 8-byte chunk and check for pins. Note that // newPinnerBits guarantees that pinnerBits will be 8-byte aligned, so we // don't have to worry about edge cases, irrelevant bits will simply be // zero.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 04 14:29:45 UTC 2024 - 11K bytes - Viewed (0) -
src/runtime/stkframe.go
mod = datap break } } if mod == nil { throw("methodValueCallFrameObjs is not in a module") } methodValueCallFrameObjs[0] = stackObjectRecord{ off: -int32(alignUp(abiRegArgsType.Size_, 8)), // It's always the highest address local. size: int32(abiRegArgsType.Size_), _ptrdata: int32(abiRegArgsType.PtrBytes),
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 02 15:10:48 UTC 2024 - 9.9K bytes - Viewed (0) -
src/runtime/mranges.go
} return a } // takeFromFront takes len bytes from the front of the address range, aligning // the base to align first. On success, returns the aligned start of the region // taken and true. func (a *addrRange) takeFromFront(len uintptr, align uint8) (uintptr, bool) { base := alignUp(a.base.addr(), uintptr(align)) + len if base > a.limit.addr() { return 0, false } a.base = offAddr{base} return base - len, true }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 14.5K bytes - Viewed (0) -
src/runtime/export_test.go
n++ } unlock(&mheap_.lock) }) return n } func UserArenaClone[T any](s T) T { return arena_heapify(s).(T) } var AlignUp = alignUp func BlockUntilEmptyFinalizerQueue(timeout int64) bool { return blockUntilEmptyFinalizerQueue(timeout) } func FrameStartLine(f *Frame) int { return f.startLine }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:50:53 UTC 2024 - 46.1K bytes - Viewed (0) -
src/runtime/race.go
end = firstmoduledata.edata } if end < firstmoduledata.enoptrbss { end = firstmoduledata.enoptrbss } if end < firstmoduledata.ebss { end = firstmoduledata.ebss } size := alignUp(end-start, _PageSize) racecall(&__tsan_map_shadow, start, size, 0, 0) racedatastart = start racedataend = start + size return } //go:nosplit func racefini() {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:37:29 UTC 2024 - 20.4K bytes - Viewed (0) -
src/runtime/mpagealloc.go
return } // blockAlignSummaryRange aligns indices into the given level to that // level's block width (1 << levelBits[level]). It assumes lo is inclusive // and hi is exclusive, and so aligns them down and up respectively. func blockAlignSummaryRange(level int, lo, hi int) (int, int) { e := uintptr(1) << levelBits[level] return int(alignDown(uintptr(lo), e)), int(alignUp(uintptr(hi), e)) } type pageAlloc struct {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 39.2K bytes - Viewed (0)