Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 8 of 8 for Loaduintptr (0.13 sec)

  1. src/runtime/debuglog.go

    	l := getCachedDlogger()
    
    	// If we couldn't get a cached logger, try to get one from the
    	// global pool.
    	if l == nil {
    		allp := (*uintptr)(unsafe.Pointer(&allDloggers))
    		all := (*dlogger)(unsafe.Pointer(atomic.Loaduintptr(allp)))
    		for l1 := all; l1 != nil; l1 = l1.allLink {
    			if l1.owned.Load() == 0 && l1.owned.CompareAndSwap(0, 1) {
    				l = l1
    				break
    			}
    		}
    	}
    
    	// If that failed, allocate a new logger.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Apr 02 15:10:48 UTC 2024
    - 18.3K bytes
    - Viewed (0)
  2. src/sync/atomic/doc.go

    // (particularly if you target 32-bit platforms; see the bugs section).
    func LoadUint64(addr *uint64) (val uint64)
    
    // LoadUintptr atomically loads *addr.
    // Consider using the more ergonomic and less error-prone [Uintptr.Load] instead.
    func LoadUintptr(addr *uintptr) (val uintptr)
    
    // LoadPointer atomically loads *addr.
    // Consider using the more ergonomic and less error-prone [Pointer.Load] instead.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jun 07 21:14:51 UTC 2024
    - 11.7K bytes
    - Viewed (0)
  3. src/runtime/signal_unix.go

    // handle a particular signal (e.g., signal occurred on a non-Go thread).
    // See sigfwdgo for more information on when the signals are forwarded.
    //
    // This is read by the signal handler; accesses should use
    // atomic.Loaduintptr and atomic.Storeuintptr.
    var fwdSig [_NSIG]uintptr
    
    // handlingSig is indexed by signal number and is non-zero if we are
    // currently handling the signal. Or, to put it another way, whether
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 10 16:04:54 UTC 2024
    - 45K bytes
    - Viewed (0)
  4. src/cmd/compile/internal/test/inl_test.go

    		// internal/runtime/atomic.Loaduintptr is only intrinsified on these platforms.
    		want["runtime"] = append(want["runtime"], "traceAcquire")
    	}
    	if bits.UintSize == 64 {
    		// mix is only defined on 64-bit architectures
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Apr 09 04:07:57 UTC 2024
    - 10.7K bytes
    - Viewed (0)
  5. src/internal/runtime/atomic/types.go

    type Uintptr struct {
    	noCopy noCopy
    	value  uintptr
    }
    
    // Load accesses and returns the value atomically.
    //
    //go:nosplit
    func (u *Uintptr) Load() uintptr {
    	return Loaduintptr(&u.value)
    }
    
    // LoadAcquire is a partially unsynchronized version
    // of Load that relaxes ordering constraints. Other threads
    // may observe operations that precede this operation to
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 14.2K bytes
    - Viewed (0)
  6. src/runtime/race.go

    //go:linkname abigen_sync_atomic_LoadUint64 sync/atomic.LoadUint64
    func abigen_sync_atomic_LoadUint64(addr *uint64) (val uint64)
    
    //go:linkname abigen_sync_atomic_LoadUintptr sync/atomic.LoadUintptr
    func abigen_sync_atomic_LoadUintptr(addr *uintptr) (val uintptr)
    
    //go:linkname abigen_sync_atomic_LoadPointer sync/atomic.LoadPointer
    func abigen_sync_atomic_LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:37:29 UTC 2024
    - 20.4K bytes
    - Viewed (0)
  7. src/runtime/stack.go

    	thisg.m.morebuf.g = 0
    
    	// NOTE: stackguard0 may change underfoot, if another thread
    	// is about to try to preempt gp. Read it just once and use that same
    	// value now and below.
    	stackguard0 := atomic.Loaduintptr(&gp.stackguard0)
    
    	// Be conservative about where we preempt.
    	// We are interested in preempting user Go code, not runtime code.
    	// If we're holding locks, mallocing, or preemption is disabled, don't
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 41.1K bytes
    - Viewed (0)
  8. src/runtime/arena.go

    		// a platform-ordered way for efficiency, but stores back the
    		// data in little endian order, since we expose the bitmap through
    		// a dummy type.
    		h = h.write(s, readUintptr(addb(p, i/8)), k)
    	}
    	// Note: we call pad here to ensure we emit explicit 0 bits
    	// for the pointerless tail of the object. This ensures that
    	// there's only a single noMorePtrs mark for the next object
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:44:56 UTC 2024
    - 37.9K bytes
    - Viewed (0)
Back to top