Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 10 for sysUnused (0.29 sec)

  1. src/runtime/mem.go

    	return sysAllocOS(n)
    }
    
    // sysUnused transitions a memory region from Ready to Prepared. It notifies the
    // operating system that the physical pages backing this memory region are no
    // longer needed and can be reused for other purposes. The contents of a
    // sysUnused memory region are considered forfeit and the region must not be
    // accessed again until sysUsed is called.
    func sysUnused(v unsafe.Pointer, n uintptr) {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Aug 22 19:05:10 UTC 2023
    - 6.7K bytes
    - Viewed (0)
  2. src/runtime/mem_windows.go

    		n -= small
    	}
    }
    
    func sysUsedOS(v unsafe.Pointer, n uintptr) {
    	p := stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_COMMIT, _PAGE_READWRITE)
    	if p == uintptr(v) {
    		return
    	}
    
    	// Commit failed. See SysUnused.
    	// Hold on to n here so we can give back a better error message
    	// for certain cases.
    	k := n
    	for k > 0 {
    		small := k
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Aug 22 19:05:10 UTC 2023
    - 3.9K bytes
    - Viewed (0)
  3. src/runtime/mem_linux.go

    		// madvise will round this to any physical page
    		// *covered* by this range, so an unaligned madvise
    		// will release more memory than intended.
    		throw("unaligned sysUnused")
    	}
    
    	advise := atomic.Load(&adviseUnused)
    	if debug.madvdontneed != 0 && advise != madviseUnsupported {
    		advise = _MADV_DONTNEED
    	}
    	switch advise {
    	case _MADV_FREE:
    		if madvise(v, n, _MADV_FREE) == 0 {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 5K bytes
    - Viewed (0)
  4. src/runtime/mgcscavenge.go

    			// With that done, it's safe to unlock.
    			unlock(p.mheapLock)
    
    			if !p.test {
    				// Only perform sys* operations if we're not in a test.
    				// It's dangerous to do so otherwise.
    				sysUnused(unsafe.Pointer(addr), uintptr(npages)*pageSize)
    
    				// Update global accounting only when not in test, otherwise
    				// the runtime's accounting will be wrong.
    				nbytes := int64(npages * pageSize)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:48:45 UTC 2024
    - 52.3K bytes
    - Viewed (0)
  5. src/runtime/mpagealloc_64bit.go

    		if need.size() == 0 {
    			continue
    		}
    
    		// Map and commit need.
    		sysMap(unsafe.Pointer(need.base.addr()), need.size(), p.sysStat)
    		sysUsed(unsafe.Pointer(need.base.addr()), need.size(), need.size())
    		p.summaryMappedReady += need.size()
    	}
    
    	// Update the scavenge index.
    	p.summaryMappedReady += p.scav.index.sysGrow(base, limit, p.sysStat)
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Jan 03 11:00:10 UTC 2024
    - 9.3K bytes
    - Viewed (0)
  6. src/runtime/mpagealloc_32bit.go

    	if reservation == nil {
    		throw("failed to reserve page summary memory")
    	}
    	// There isn't much. Just map it and mark it as used immediately.
    	sysMap(reservation, totalSize, p.sysStat)
    	sysUsed(reservation, totalSize, totalSize)
    	p.summaryMappedReady += totalSize
    
    	// Iterate over the reservation and cut it up into slices.
    	//
    	// Maintain i as the byte offset from reservation where
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 20 20:08:25 UTC 2023
    - 4.6K bytes
    - Viewed (0)
  7. src/runtime/mheap.go

    	// Commit and account for any scavenged memory that the span now owns.
    	nbytes := npages * pageSize
    	if scav != 0 {
    		// sysUsed all the pages that are actually available
    		// in the span since some of them might be scavenged.
    		sysUsed(unsafe.Pointer(base), nbytes, scav)
    		gcController.heapReleased.add(-int64(scav))
    	}
    	// Update stats.
    	gcController.heapFree.add(-int64(nbytes - scav))
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 78K bytes
    - Viewed (0)
  8. src/runtime/arena.go

    	//
    	// Unlike (*mheap).grow, just map in everything that we
    	// asked for. We're likely going to use it all.
    	sysMap(unsafe.Pointer(base), userArenaChunkBytes, &gcController.heapReleased)
    	sysUsed(unsafe.Pointer(base), userArenaChunkBytes, userArenaChunkBytes)
    
    	// Model the user arena as a heap span for a large object.
    	spc := makeSpanClass(0, false)
    	h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:44:56 UTC 2024
    - 37.9K bytes
    - Viewed (0)
  9. src/runtime/export_test.go

    	// Free extra data structures.
    	sysFreeOS(unsafe.Pointer(&p.scav.index.chunks[0]), uintptr(cap(p.scav.index.chunks))*unsafe.Sizeof(atomicScavChunkData{}))
    
    	// Subtract back out whatever we mapped for the summaries.
    	// sysUsed adds to p.sysStat and memstats.mappedReady no matter what
    	// (and in anger should actually be accounted for), and there's no other
    	// way to figure out how much we actually mapped.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:50:53 UTC 2024
    - 46.1K bytes
    - Viewed (0)
  10. src/runtime/malloc.go

    	if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped {
    		if l.mapMemory {
    			// Transition from Reserved to Prepared to Ready.
    			n := pEnd - l.mapped
    			sysMap(unsafe.Pointer(l.mapped), n, sysStat)
    			sysUsed(unsafe.Pointer(l.mapped), n, n)
    		}
    		l.mapped = pEnd
    	}
    	return unsafe.Pointer(p)
    }
    
    // notInHeap is off-heap memory allocated by a lower-level allocator
    // like sysAlloc or persistentAlloc.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 59.6K bytes
    - Viewed (0)
Back to top