Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 19 of 19 for Loaduintptr (0.19 sec)

  1. src/sync/atomic/atomic_test.go

    		after  uintptr
    	}
    	var m uint64 = magic64
    	magicptr := uintptr(m)
    	x.before = magicptr
    	x.after = magicptr
    	for delta := uintptr(1); delta+delta > delta; delta += delta {
    		k := LoadUintptr(&x.i)
    		if k != x.i {
    			t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k)
    		}
    		x.i += delta
    	}
    	if x.before != magicptr || x.after != magicptr {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:37:29 UTC 2024
    - 71.4K bytes
    - Viewed (0)
  2. src/runtime/race.go

    //go:linkname abigen_sync_atomic_LoadUint64 sync/atomic.LoadUint64
    func abigen_sync_atomic_LoadUint64(addr *uint64) (val uint64)
    
    //go:linkname abigen_sync_atomic_LoadUintptr sync/atomic.LoadUintptr
    func abigen_sync_atomic_LoadUintptr(addr *uintptr) (val uintptr)
    
    //go:linkname abigen_sync_atomic_LoadPointer sync/atomic.LoadPointer
    func abigen_sync_atomic_LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:37:29 UTC 2024
    - 20.4K bytes
    - Viewed (0)
  3. src/runtime/mheap.go

    func (h *mheap) allocNeedsZero(base, npage uintptr) (needZero bool) {
    	for npage > 0 {
    		ai := arenaIndex(base)
    		ha := h.arenas[ai.l1()][ai.l2()]
    
    		zeroedBase := atomic.Loaduintptr(&ha.zeroedBase)
    		arenaBase := base % heapArenaBytes
    		if arenaBase < zeroedBase {
    			// We extended into the non-zeroed part of the
    			// arena, so this region needs to be zeroed before use.
    			//
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 78K bytes
    - Viewed (0)
  4. src/runtime/stack.go

    	thisg.m.morebuf.g = 0
    
    	// NOTE: stackguard0 may change underfoot, if another thread
    	// is about to try to preempt gp. Read it just once and use that same
    	// value now and below.
    	stackguard0 := atomic.Loaduintptr(&gp.stackguard0)
    
    	// Be conservative about where we preempt.
    	// We are interested in preempting user Go code, not runtime code.
    	// If we're holding locks, mallocing, or preemption is disabled, don't
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 41.1K bytes
    - Viewed (0)
  5. src/runtime/malloc.go

    // persistentalloc. This must be nosplit because it is called by the
    // cgo checker code, which is called by the write barrier code.
    //
    //go:nosplit
    func inPersistentAlloc(p uintptr) bool {
    	chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks)))
    	for chunk != 0 {
    		if p >= chunk && p < chunk+persistentChunkSize {
    			return true
    		}
    		chunk = *(*uintptr)(unsafe.Pointer(chunk))
    	}
    	return false
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 59.6K bytes
    - Viewed (0)
  6. src/runtime/proc.go

    	return allgs[:len(allgs):len(allgs)]
    }
    
    // atomicAllG returns &allgs[0] and len(allgs) for use with atomicAllGIndex.
    func atomicAllG() (**g, uintptr) {
    	length := atomic.Loaduintptr(&allglen)
    	ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
    	return ptr, length
    }
    
    // atomicAllGIndex returns ptr[i] with the allgptr returned from atomicAllG.
    func atomicAllGIndex(ptr **g, i uintptr) *g {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 207.5K bytes
    - Viewed (0)
  7. src/cmd/compile/internal/ssagen/ssa.go

    	alias("internal/runtime/atomic", "Loadint64", "internal/runtime/atomic", "Load64", all...)
    	alias("internal/runtime/atomic", "Loaduintptr", "internal/runtime/atomic", "Load", p4...)
    	alias("internal/runtime/atomic", "Loaduintptr", "internal/runtime/atomic", "Load64", p8...)
    	alias("internal/runtime/atomic", "Loaduint", "internal/runtime/atomic", "Load", p4...)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Jun 10 19:44:43 UTC 2024
    - 284.9K bytes
    - Viewed (0)
  8. src/runtime/mbitmap.go

    		tp.elem += tp.typ.Size_
    		tp.addr = tp.elem
    		tp.mask = readUintptr(tp.typ.GCData)
    
    		// We may have exceeded the limit after this. Bail just like next does.
    		if tp.addr >= limit {
    			return typePointers{}
    		}
    	} else {
    		// Grab the mask, but then clear any bits before the target address and any
    		// bits over the limit.
    		tp.mask = readUintptr(addb(tp.typ.GCData, (tp.addr-tp.elem)/goarch.PtrSize/8))
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 00:18:55 UTC 2024
    - 60K bytes
    - Viewed (0)
  9. src/runtime/arena.go

    		// a platform-ordered way for efficiency, but stores back the
    		// data in little endian order, since we expose the bitmap through
    		// a dummy type.
    		h = h.write(s, readUintptr(addb(p, i/8)), k)
    	}
    	// Note: we call pad here to ensure we emit explicit 0 bits
    	// for the pointerless tail of the object. This ensures that
    	// there's only a single noMorePtrs mark for the next object
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:44:56 UTC 2024
    - 37.9K bytes
    - Viewed (0)
Back to top