Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 538 for alignUp (0.25 sec)

  1. src/runtime/syscall_windows.go

    		// caller reserved spill space.
    		p.dstSpill = alignUp(p.dstSpill, uintptr(t.Align_))
    		p.dstSpill += t.Size_
    	} else {
    		// Register assignment failed.
    		// Undo the work and stack assign.
    		p.parts = oldParts
    
    		// The Go ABI aligns arguments.
    		p.dstStackSize = alignUp(p.dstStackSize, uintptr(t.Align_))
    
    		// Copy just the size of the argument. Note that this
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 20:12:46 UTC 2024
    - 16.6K bytes
    - Viewed (0)
  2. src/runtime/mpagealloc_64bit.go

    	//
    	// The base address of the backing store is always page-aligned,
    	// because it comes from the OS, so it's sufficient to align the
    	// index.
    	haveMin := s.min.Load()
    	haveMax := s.max.Load()
    	needMin := alignDown(uintptr(chunkIndex(base)), physPageSize/scSize)
    	needMax := alignUp(uintptr(chunkIndex(limit)), physPageSize/scSize)
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Jan 03 11:00:10 UTC 2024
    - 9.3K bytes
    - Viewed (0)
  3. src/runtime/malloc.go

    		p = alignUp(p, align)
    		p2 := sysReserve(unsafe.Pointer(p), size)
    		if p != uintptr(p2) {
    			// Must have raced. Try again.
    			sysFreeOS(p2, size)
    			if retries++; retries == 100 {
    				throw("failed to allocate aligned heap memory; too many retries")
    			}
    			goto retry
    		}
    		// Success.
    		return p2, size
    	default:
    		// Trim off the unaligned parts.
    		pAligned := alignUp(p, align)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 59.6K bytes
    - Viewed (0)
  4. src/runtime/traceregion.go

    	off  atomic.Uintptr
    }
    
    const traceRegionAllocBlockData = 64<<10 - unsafe.Sizeof(traceRegionAllocBlockHeader{})
    
    // alloc allocates n-byte block. The block is always aligned to 8 bytes, regardless of platform.
    func (a *traceRegionAlloc) alloc(n uintptr) *notInHeap {
    	n = alignUp(n, 8)
    	if n > traceRegionAllocBlockData {
    		throw("traceRegion: alloc too large")
    	}
    	if a.dropping.Load() {
    		throw("traceRegion: alloc with concurrent drop")
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:47:01 UTC 2024
    - 3.2K bytes
    - Viewed (0)
  5. src/runtime/mpagealloc_32bit.go

    		print("runtime: base = ", hex(base), ", limit = ", hex(limit), "\n")
    		throw("sysGrow bounds not aligned to pallocChunkBytes")
    	}
    
    	// Walk up the tree and update the summary slices.
    	for l := len(p.summary) - 1; l >= 0; l-- {
    		// Figure out what part of the summary array this new address space needs.
    		// Note that we need to align the ranges to the block width (1<<levelBits[l])
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 20 20:08:25 UTC 2023
    - 4.6K bytes
    - Viewed (0)
  6. src/runtime/mem_sbrk.go

    				throw("mem: uninitialised memory")
    			}
    		}
    	}
    }
    
    func memRound(p uintptr) uintptr {
    	return alignUp(p, physPageSize)
    }
    
    func initBloc() {
    	bloc = memRound(firstmoduledata.end)
    	blocMax = bloc
    }
    
    func sysAllocOS(n uintptr) unsafe.Pointer {
    	lock(&memlock)
    	p := memAlloc(n)
    	memCheck()
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Aug 22 19:05:10 UTC 2023
    - 4.2K bytes
    - Viewed (0)
  7. src/runtime/mem_linux.go

    		// The Linux implementation requires that the address
    		// addr be page-aligned, and allows length to be zero.
    		throw("unaligned sysNoHugePageOS")
    	}
    	madvise(v, n, _MADV_NOHUGEPAGE)
    }
    
    func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr) {
    	if uintptr(v)&(physPageSize-1) != 0 {
    		// The Linux implementation requires that the address
    		// addr be page-aligned, and allows length to be zero.
    		throw("unaligned sysHugePageCollapseOS")
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 5K bytes
    - Viewed (0)
  8. src/runtime/stubs.go

    func call1073741824(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
    
    func systemstack_switch()
    
    // alignUp rounds n up to a multiple of a. a must be a power of 2.
    //
    //go:nosplit
    func alignUp(n, a uintptr) uintptr {
    	return (n + a - 1) &^ (a - 1)
    }
    
    // alignDown rounds n down to a multiple of a. a must be a power of 2.
    //
    //go:nosplit
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 20.2K bytes
    - Viewed (0)
  9. src/runtime/mfinal.go

    okarg:
    	// compute size needed for return parameters
    	nret := uintptr(0)
    	for _, t := range ft.OutSlice() {
    		nret = alignUp(nret, uintptr(t.Align_)) + t.Size_
    	}
    	nret = alignUp(nret, goarch.PtrSize)
    
    	// make sure we have a finalizer goroutine
    	createfing()
    
    	systemstack(func() {
    		if !addfinalizer(e.data, (*funcval)(f.data), nret, fint, ot) {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jun 07 01:56:56 UTC 2024
    - 19K bytes
    - Viewed (0)
  10. src/cmd/compile/internal/test/inl_test.go

    	// might not actually be inlined anywhere.
    	want := map[string][]string{
    		"runtime": {
    			"add",
    			"acquirem",
    			"add1",
    			"addb",
    			"adjustpanics",
    			"adjustpointer",
    			"alignDown",
    			"alignUp",
    			"bucketMask",
    			"bucketShift",
    			"chanbuf",
    			"evacuated",
    			"fastlog2",
    			"float64bits",
    			"funcspdelta",
    			"getm",
    			"getMCache",
    			"isDirectIface",
    			"itabHashFunc",
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Apr 09 04:07:57 UTC 2024
    - 10.7K bytes
    - Viewed (0)
Back to top