- Sort Score
- Result 10 results
- Languages All
Results 1 - 8 of 8 for asanunpoison (0.12 sec)
-
src/runtime/asan0.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Dec 15 21:57:36 UTC 2023 - 760 bytes - Viewed (0) -
src/runtime/asan.go
} //go:noescape func doasanread(addr unsafe.Pointer, sz, sp, pc uintptr) //go:noescape func doasanwrite(addr unsafe.Pointer, sz, sp, pc uintptr) //go:noescape func asanunpoison(addr unsafe.Pointer, sz uintptr) //go:noescape func asanpoison(addr unsafe.Pointer, sz uintptr) //go:noescape func asanregisterglobals(addr unsafe.Pointer, n uintptr) // These are called from asan_GOARCH.s //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Feb 13 20:39:58 UTC 2024 - 1.6K bytes - Viewed (0) -
src/runtime/arena.go
span.elemsize -= rzSize span.largeType.Size_ = span.elemsize rzStart := span.base() + span.elemsize span.userArenaChunkFree = makeAddrRange(span.base(), rzStart) asanpoison(unsafe.Pointer(rzStart), span.limit-rzStart) asanunpoison(unsafe.Pointer(span.base()), span.elemsize) } if rate := MemProfileRate; rate > 0 { c := getMCache(mp) if c == nil {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:44:56 UTC 2024 - 37.9K bytes - Viewed (0) -
src/runtime/stack.go
traceRelease(trace) } } if raceenabled { racemalloc(v, uintptr(n)) } if msanenabled { msanmalloc(v, uintptr(n)) } if asanenabled { asanunpoison(v, uintptr(n)) } if stackDebug >= 1 { print(" allocated ", v, "\n") } return stack{uintptr(v), uintptr(v) + uintptr(n)} } // stackfree frees an n byte stack allocation at stk. //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 41.1K bytes - Viewed (0) -
src/runtime/malloc.go
// The allocated memory is larger than required userSize, it will also include // redzone and some other padding bytes. rzBeg := unsafe.Add(x, userSize) asanpoison(rzBeg, size-userSize) asanunpoison(x, userSize) } // TODO(mknyszek): We should really count the header as part // of gc_sys or something. The code below just pretends it is
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 59.6K bytes - Viewed (0) -
src/runtime/proc.go
racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) } if msanenabled { msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) } if asanenabled { asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) } } return gp } // Purge all cached G's from gfree list to the global list. func gfpurge(pp *p) { var ( inc int32
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 207.5K bytes - Viewed (0) -
src/runtime/mgcsweep.go
racefree(unsafe.Pointer(x), size) } if msanenabled && !s.isUserArenaChunk { msanfree(unsafe.Pointer(x), size) } if asanenabled && !s.isUserArenaChunk { asanpoison(unsafe.Pointer(x), size) } } mbits.advance() abits.advance() } } // Check for zombie objects. if s.freeindex < s.nelems { // Everything < freeindex is allocated and hence
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:52:18 UTC 2024 - 32.9K bytes - Viewed (0) -
src/runtime/mheap.go
msanfree(base, bytes) } if asanenabled { // Tell asan that this entire span is no longer in use. base := unsafe.Pointer(s.base()) bytes := s.npages << _PageShift asanpoison(base, bytes) } h.freeSpanLocked(s, spanAllocHeap) unlock(&h.lock) }) } // freeManual frees a manually-managed span returned by allocManual.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 78K bytes - Viewed (0)