Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 33 for sysMap (0.2 sec)

  1. src/runtime/runtime_mmap_test.go

    import (
    	"runtime"
    	"testing"
    	"unsafe"
    )
    
    // Test that the error value returned by mmap is positive, as that is
    // what the code in mem_bsd.go, mem_darwin.go, and mem_linux.go expects.
    // See the uses of ENOMEM in sysMap in those files.
    func TestMmapErrorSign(t *testing.T) {
    	p, err := runtime.Mmap(nil, ^uintptr(0)&^(runtime.GetPhysPageSize()-1), 0, runtime.MAP_ANON|runtime.MAP_PRIVATE, -1, 0)
    
    	if p != nil || err != runtime.ENOMEM {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Mar 29 16:24:51 UTC 2022
    - 1.8K bytes
    - Viewed (0)
  2. src/runtime/mem_aix.go

    	if err != 0 {
    		return nil
    	}
    	return p
    }
    
    func sysMapOS(v unsafe.Pointer, n uintptr) {
    	// AIX does not allow mapping a range that is already mapped.
    	// So, call mprotect to change permissions.
    	// Note that sysMap is always called with a non-nil pointer
    	// since it transitions a Reserved memory region to Prepared,
    	// so mprotect is always possible.
    	_, err := mprotect(v, n, _PROT_READ|_PROT_WRITE)
    	if err == _ENOMEM {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Aug 22 19:05:10 UTC 2023
    - 2K bytes
    - Viewed (0)
  3. src/runtime/mem.go

    func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer {
    	return sysReserveOS(v, n)
    }
    
    // sysMap transitions a memory region from Reserved to Prepared. It ensures the
    // memory region can be efficiently transitioned to Ready.
    //
    // sysStat must be non-nil.
    func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
    	sysStat.add(int64(n))
    	sysMapOS(v, n)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Aug 22 19:05:10 UTC 2023
    - 6.7K bytes
    - Viewed (0)
  4. src/runtime/mpagealloc_32bit.go

    	reservation := sysReserve(nil, totalSize)
    	if reservation == nil {
    		throw("failed to reserve page summary memory")
    	}
    	// There isn't much. Just map it and mark it as used immediately.
    	sysMap(reservation, totalSize, p.sysStat)
    	sysUsed(reservation, totalSize, totalSize)
    	p.summaryMappedReady += totalSize
    
    	// Iterate over the reservation and cut it up into slices.
    	//
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 20 20:08:25 UTC 2023
    - 4.6K bytes
    - Viewed (0)
  5. src/runtime/mpagealloc_64bit.go

    		}
    		// It's possible that after our pruning above, there's nothing new to map.
    		if need.size() == 0 {
    			continue
    		}
    
    		// Map and commit need.
    		sysMap(unsafe.Pointer(need.base.addr()), need.size(), p.sysStat)
    		sysUsed(unsafe.Pointer(need.base.addr()), need.size(), need.size())
    		p.summaryMappedReady += need.size()
    	}
    
    	// Update the scavenge index.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Jan 03 11:00:10 UTC 2024
    - 9.3K bytes
    - Viewed (0)
  6. src/runtime/mheap.go

    //
    // h.lock must be held.
    func (h *mheap) grow(npage uintptr) (uintptr, bool) {
    	assertLockHeld(&h.lock)
    
    	// We must grow the heap in whole palloc chunks.
    	// We call sysMap below but note that because we
    	// round up to pallocChunkPages which is on the order
    	// of MiB (generally >= to the huge page size) we
    	// won't be calling it too much.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 78K bytes
    - Viewed (0)
  7. src/runtime/arena.go

    	// reusing is set to fault (so, also Reserved), so transition
    	// it to Prepared and then Ready.
    	//
    	// Unlike (*mheap).grow, just map in everything that we
    	// asked for. We're likely going to use it all.
    	sysMap(unsafe.Pointer(base), userArenaChunkBytes, &gcController.heapReleased)
    	sysUsed(unsafe.Pointer(base), userArenaChunkBytes, userArenaChunkBytes)
    
    	// Model the user arena as a heap span for a large object.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:44:56 UTC 2024
    - 37.9K bytes
    - Viewed (0)
  8. src/runtime/malloc.go

    		return nil
    	}
    	l.next = p + size
    	if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped {
    		if l.mapMemory {
    			// Transition from Reserved to Prepared to Ready.
    			n := pEnd - l.mapped
    			sysMap(unsafe.Pointer(l.mapped), n, sysStat)
    			sysUsed(unsafe.Pointer(l.mapped), n, n)
    		}
    		l.mapped = pEnd
    	}
    	return unsafe.Pointer(p)
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 59.6K bytes
    - Viewed (0)
  9. src/runtime/cgo_mmap.go

    		}
    		return unsafe.Pointer(ret), 0
    	}
    	return sysMmap(addr, n, prot, flags, fd, off)
    }
    
    func munmap(addr unsafe.Pointer, n uintptr) {
    	if _cgo_munmap != nil {
    		systemstack(func() { callCgoMunmap(addr, n) })
    		return
    	}
    	sysMunmap(addr, n)
    }
    
    // sysMmap calls the mmap system call. It is implemented in assembly.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Aug 25 20:58:13 UTC 2023
    - 2.4K bytes
    - Viewed (0)
  10. src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/condition_util.go

    	}
    	width := uint32(32)
    	if sf == 1 {
    		width = uint32(64)
    	}
    	if imms >= (width - 15) {
    		return (immr % 16) <= (imms - (width - 15))
    	}
    	return false
    }
    
    type sys uint8
    
    const (
    	sys_AT sys = iota
    	sys_DC
    	sys_IC
    	sys_TLBI
    	sys_SYS
    )
    
    func sys_op_4(op1, crn, crm, op2 uint32) sys {
    	sysInst := sysInstFields{uint8(op1), uint8(crn), uint8(crm), uint8(op2)}
    	return sysInst.getType()
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon May 16 22:24:28 UTC 2022
    - 1.4K bytes
    - Viewed (0)
Back to top