Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 758 for ATOMIC (0.12 sec)

  1. android/guava/src/com/google/common/util/concurrent/AtomicLongMap.java

        outer:
        while (true) {
          AtomicLong atomic = map.get(key);
          if (atomic == null) {
            atomic = map.putIfAbsent(key, new AtomicLong(delta));
            if (atomic == null) {
              return delta;
            }
            // atomic is now non-null; fall through
          }
    
          while (true) {
            long oldValue = atomic.get();
            if (oldValue == 0L) {
    Registered: Wed Jun 12 16:38:11 UTC 2024
    - Last Modified: Mon Apr 01 16:15:01 UTC 2024
    - 14.1K bytes
    - Viewed (0)
  2. src/runtime/atomic_pointer.go

    // to be able to intercept the sync/atomic forms but not the runtime forms.
    
    //go:linkname sync_atomic_StoreUintptr sync/atomic.StoreUintptr
    func sync_atomic_StoreUintptr(ptr *uintptr, new uintptr)
    
    //go:linkname sync_atomic_StorePointer sync/atomic.StorePointer
    //go:nosplit
    func sync_atomic_StorePointer(ptr *unsafe.Pointer, new unsafe.Pointer) {
    	if writeBarrier.enabled {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 16:25:21 UTC 2024
    - 4K bytes
    - Viewed (0)
  3. src/internal/runtime/atomic/bench_test.go

    	x = 1
    	ptr := &x
    	b.RunParallel(func(pb *testing.PB) {
    		for pb.Next() {
    			atomic.Cas(ptr, 1, 0)
    			atomic.Cas(ptr, 0, 1)
    		}
    	})
    }
    
    func BenchmarkCas64(b *testing.B) {
    	var x uint64
    	x = 1
    	ptr := &x
    	b.RunParallel(func(pb *testing.PB) {
    		for pb.Next() {
    			atomic.Cas64(ptr, 1, 0)
    			atomic.Cas64(ptr, 0, 1)
    		}
    	})
    }
    func BenchmarkXchg(b *testing.B) {
    	var x uint32
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 3.2K bytes
    - Viewed (0)
  4. src/runtime/lock_futex.go

    }
    
    func mutexContended(l *mutex) bool {
    	return atomic.Load(key32(&l.key)) > mutex_locked
    }
    
    func lock(l *mutex) {
    	lockWithRank(l, getLockRank(l))
    }
    
    func lock2(l *mutex) {
    	gp := getg()
    
    	if gp.m.locks < 0 {
    		throw("runtime·lock: lock count")
    	}
    	gp.m.locks++
    
    	// Speculative grab for lock.
    	v := atomic.Xchg(key32(&l.key), mutex_locked)
    	if v == mutex_unlocked {
    		return
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:34 UTC 2024
    - 5.4K bytes
    - Viewed (0)
  5. src/cmd/go/testdata/script/cover_sync_atomic_import.txt

    go test -short -cover -covermode=atomic -coverpkg=coverdep/p1 coverdep
    
    # In addition to the above, test to make sure there is no funny
    # business if we try "go test -cover" in atomic mode targeting
    # sync/atomic itself (see #57445). Just a short test run is needed
    # since we're mainly interested in making sure the test builds and can
    # execute at least one test.
    
    go test -short -covermode=atomic -run=TestStoreInt64 sync/atomic
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Mar 26 00:18:30 UTC 2024
    - 1011 bytes
    - Viewed (0)
  6. src/internal/runtime/atomic/atomic_test.go

    	p64 := (*int64)(u)   // misaligned
    
    	shouldPanic(t, "Load64", func() { atomic.Load64(up64) })
    	shouldPanic(t, "Loadint64", func() { atomic.Loadint64(p64) })
    	shouldPanic(t, "Store64", func() { atomic.Store64(up64, 0) })
    	shouldPanic(t, "Xadd64", func() { atomic.Xadd64(up64, 1) })
    	shouldPanic(t, "Xchg64", func() { atomic.Xchg64(up64, 1) })
    	shouldPanic(t, "Cas64", func() { atomic.Cas64(up64, 1, 2) })
    }
    
    func TestAnd8(t *testing.T) {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 8.5K bytes
    - Viewed (0)
  7. cni/pkg/nodeagent/healthServer.go

    		_ = http.ListenAndServe(":"+constants.ReadinessPort, router)
    	}()
    
    	return
    }
    
    func initRouter(router *http.ServeMux) (installReady *atomic.Value, watchReady *atomic.Value) {
    	installReady = &atomic.Value{}
    	watchReady = &atomic.Value{}
    	installReady.Store(false)
    	watchReady.Store(false)
    
    	router.HandleFunc(constants.LivenessEndpoint, healthz)
    	router.HandleFunc(constants.ReadinessEndpoint, readyz(installReady, watchReady))
    
    	return
    Registered: Fri Jun 14 15:00:06 UTC 2024
    - Last Modified: Mon Apr 15 01:29:35 UTC 2024
    - 1.8K bytes
    - Viewed (0)
  8. src/internal/runtime/atomic/atomic_andor_test.go

    // implementations for all architectures.
    package atomic_test
    
    import (
    	"internal/runtime/atomic"
    	"testing"
    )
    
    func TestAnd32(t *testing.T) {
    	// Basic sanity check.
    	x := uint32(0xffffffff)
    	for i := uint32(0); i < 32; i++ {
    		old := x
    		v := atomic.And32(&x, ^(1 << i))
    		if r := uint32(0xffffffff) << (i + 1); x != r || v != old {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sat Apr 27 20:49:32 UTC 2024
    - 5.2K bytes
    - Viewed (0)
  9. src/runtime/vdso_freebsd_x86.go

    //go:nosplit
    func (th *vdsoTimehands) getHPETTimecounter() (uint32, bool) {
    	idx := int(th.x86_hpet_idx)
    	if idx >= len(hpetDevMap) {
    		return 0, false
    	}
    
    	p := atomic.Loaduintptr(&hpetDevMap[idx])
    	if p == 0 {
    		systemstack(func() { initHPETTimecounter(idx) })
    		p = atomic.Loaduintptr(&hpetDevMap[idx])
    	}
    	if p == ^uintptr(0) {
    		return 0, false
    	}
    	return *(*uint32)(unsafe.Pointer(p + _HPET_MAIN_COUNTER)), true
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 1.9K bytes
    - Viewed (0)
  10. src/runtime/lock_sema.go

    //go:nowritebarrier
    func unlock2(l *mutex) {
    	gp := getg()
    	var mp *m
    	for {
    		v := atomic.Loaduintptr(&l.key)
    		if v == locked {
    			if atomic.Casuintptr(&l.key, locked, 0) {
    				break
    			}
    		} else {
    			// Other M's are waiting for the lock.
    			// Dequeue an M.
    			mp = muintptr(v &^ locked).ptr()
    			if atomic.Casuintptr(&l.key, v, uintptr(mp.nextwaitm)) {
    				// Dequeued an M.  Wake it.
    				semawakeup(mp)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:37 UTC 2024
    - 6.8K bytes
    - Viewed (0)
Back to top