- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 33 for stkalloc (0.11 sec)
-
src/runtime/os_freebsd.go
throw("newosproc") } } // Version of newosproc that doesn't require a valid G. // //go:nosplit func newosproc0(stacksize uintptr, fn unsafe.Pointer) { stack := sysAlloc(stacksize, &memstats.stacks_sys) if stack == nil { writeErrStr(failallocatestack) exit(1) } // This code "knows" it's being called once from the library
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Dec 05 20:34:30 UTC 2023 - 11.6K bytes - Viewed (0) -
src/runtime/malloc_test.go
t.FailNow() } } type acLink struct { x [1 << 20]byte } var arenaCollisionSink []*acLink func TestArenaCollision(t *testing.T) { testenv.MustHaveExec(t) // Test that mheap.sysAlloc handles collisions with other // memory mappings. if os.Getenv("TEST_ARENA_COLLISION") != "1" { cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=^TestArenaCollision$", "-test.v"))
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Sep 05 23:35:29 UTC 2023 - 10.6K bytes - Viewed (0) -
src/runtime/mheap.go
n := 64 * 1024 / goarch.PtrSize if n < cap(h.allspans)*3/2 { n = cap(h.allspans) * 3 / 2 } var new []*mspan sp := (*slice)(unsafe.Pointer(&new)) sp.array = sysAlloc(uintptr(n)*goarch.PtrSize, &memstats.other_sys) if sp.array == nil { throw("runtime: cannot allocate memory") } sp.len = len(h.allspans) sp.cap = n if len(h.allspans) > 0 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 78K bytes - Viewed (0) -
src/runtime/heapdump.go
nptr := size / goarch.PtrSize if uintptr(len(tmpbuf)) < nptr/8+1 { if tmpbuf != nil { sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys) } n := nptr/8 + 1 p := sysAlloc(n, &memstats.other_sys) if p == nil { throw("heapdump: out of memory") } tmpbuf = (*[1 << 30]byte)(p)[:n] } // Convert heap bitmap to pointer bitmap. clear(tmpbuf[:nptr/8+1]) s := spanOf(p)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 09 04:07:57 UTC 2024 - 17.6K bytes - Viewed (0) -
src/runtime/sys_darwin.go
} return } func osinit_hack_trampoline() // mmap is used to do low-level memory allocation via mmap. Don't allow stack // splits, since this function (used by sysAlloc) is called in a lot of low-level // parts of the runtime and callers often assume it won't acquire any locks. // //go:nosplit
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:17:26 UTC 2024 - 23.9K bytes - Viewed (0) -
src/runtime/os_linux.go
} throw("newosproc") } } // Version of newosproc that doesn't require a valid G. // //go:nosplit func newosproc0(stacksize uintptr, fn unsafe.Pointer) { stack := sysAlloc(stacksize, &memstats.stacks_sys) if stack == nil { writeErrStr(failallocatestack) exit(1) } ret := clone(cloneFlags, unsafe.Pointer(uintptr(stack)+stacksize), nil, nil, fn) if ret < 0 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 25.9K bytes - Viewed (0) -
src/runtime/mgcpacer_test.go
assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[n-2].gcUtilization, 0.005) } }, }, { // This tests the GC pacer's response to a small change in allocation rate. name: "StepAlloc", gcPercent: 100, memoryLimit: math.MaxInt64, globalsBytes: 32 << 10, nCores: 8, allocRate: constant(33.0).sum(ramp(66.0, 1).delay(50)), scanRate: constant(1024.0),
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 19 13:53:21 UTC 2023 - 39.3K bytes - Viewed (0) -
src/runtime/mpagealloc.go
for c := chunkIndex(base); c < chunkIndex(limit); c++ { if p.chunks[c.l1()] == nil { // Create the necessary l2 entry. const l2Size = unsafe.Sizeof(*p.chunks[0]) r := sysAlloc(l2Size, p.sysStat) if r == nil { throw("pageAlloc: out of memory") } if !p.test { // Make the chunk mapping eligible or ineligible // for huge pages, depending on what our current
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 39.2K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/regalloc.go
} } // If a value is live at the end of the block and // isn't in a register, generate a use for the spill location. // We need to remember this information so that // the liveness analysis in stackalloc is correct. for _, e := range s.live[b.ID] { vi := &s.values[e.ID] if vi.regs != 0 { // in a register, we'll use that source for the merge. continue } if vi.rematerializeable {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Nov 21 17:49:56 UTC 2023 - 87.2K bytes - Viewed (0) -
src/runtime/export_test.go
testSysStat.add(-int64(p.summaryMappedReady)) // Free the mapped space for chunks. for i := range p.chunks { if x := p.chunks[i]; x != nil { p.chunks[i] = nil // This memory comes from sysAlloc and will always be page-aligned. sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), testSysStat) } } } // BaseChunkIdx is a convenient chunkIdx value which works on both
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:50:53 UTC 2024 - 46.1K bytes - Viewed (0)