Search Options

Results per page
Sort
Preferred Languages
Advance

Results 71 - 80 of 92 for Loadint32 (0.22 sec)

  1. internal/s3select/simdj/reader.go

    type safeCloser struct {
    	closed uint32
    	r      io.Reader
    }
    
    func (s *safeCloser) Read(p []byte) (n int, err error) {
    	if atomic.LoadUint32(&s.closed) == 1 {
    		return 0, io.EOF
    	}
    	n, err = s.r.Read(p)
    	if atomic.LoadUint32(&s.closed) == 1 {
    		return 0, io.EOF
    	}
    	return n, err
    }
    
    func (s *safeCloser) Close() error {
    	atomic.CompareAndSwapUint32(&s.closed, 0, 1)
    Registered: Sun Jun 16 00:44:34 UTC 2024
    - Last Modified: Tue May 30 17:02:22 UTC 2023
    - 4.9K bytes
    - Viewed (0)
  2. src/runtime/debug_test.go

    	}
    	close(done)
    }
    
    // Don't inline this function, since we want to test adjusting
    // pointers in the arguments.
    //
    //go:noinline
    func debugCallWorker2(stop *uint32, x *int) {
    	for atomic.LoadUint32(stop) == 0 {
    		// Strongly encourage x to live in a register so we
    		// can test pointer register adjustment.
    		*x++
    	}
    	*x = 1
    }
    
    func debugCallTKill(tid int) error {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Sep 08 15:08:04 UTC 2023
    - 8K bytes
    - Viewed (0)
  3. src/runtime/runtime_unix_test.go

    	var stop uint32
    	defer atomic.StoreUint32(&stop, 1) // in case of panic
    
    	var wg sync.WaitGroup
    	for i := 0; i < 4; i++ {
    		wg.Add(1)
    		go func() {
    			for atomic.LoadUint32(&stop) == 0 {
    				syscall.Close(-1)
    			}
    			wg.Done()
    		}()
    	}
    
    	max := 10000
    	if testing.Short() {
    		max = 100
    	}
    	stk := make([]runtime.StackRecord, 128)
    	for n := 0; n < max; n++ {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Oct 28 18:17:57 UTC 2021
    - 1.2K bytes
    - Viewed (0)
  4. src/cmd/compile/internal/ssagen/ssa.go

    	// Note: these are disabled by flag_race in findIntrinsic below.
    	alias("sync/atomic", "LoadInt32", "internal/runtime/atomic", "Load", all...)
    	alias("sync/atomic", "LoadInt64", "internal/runtime/atomic", "Load64", all...)
    	alias("sync/atomic", "LoadPointer", "internal/runtime/atomic", "Loadp", all...)
    	alias("sync/atomic", "LoadUint32", "internal/runtime/atomic", "Load", all...)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Jun 10 19:44:43 UTC 2024
    - 284.9K bytes
    - Viewed (0)
  5. src/runtime/testdata/testprogcgo/lockosthread.go

    		// Check that this goroutine is running on a different thread.
    		self := C.pthread_self()
    		if C.pthread_equal(subThread, self) != 0 {
    			println("locked thread reused")
    			os.Exit(1)
    		}
    		if atomic.LoadUint32((*uint32)(&C.threadExited)) != 0 {
    			println("OK")
    			return
    		}
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Feb 02 20:21:33 UTC 2023
    - 2.5K bytes
    - Viewed (0)
  6. src/runtime/race/race_linux_test.go

    	if err != nil {
    		t.Fatalf("mprotect high failed %s\n", err)
    	}
    
    	// This should not fault.
    	a := (*uint32)(unsafe.Pointer(&b[pagesize-4]))
    	atomic.StoreUint32(a, 1)
    	if x := atomic.LoadUint32(a); x != 1 {
    		t.Fatalf("bad atomic value: %v, want 1", x)
    	}
    	if x := atomic.AddUint32(a, 1); x != 2 {
    		t.Fatalf("bad atomic value: %v, want 2", x)
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jun 16 14:09:02 UTC 2023
    - 1.9K bytes
    - Viewed (0)
  7. pkg/volume/metrics_cached.go

    // error
    type cacheOnce struct {
    	m    sync.Mutex
    	done uint32
    }
    
    // Copied from sync.Once but we don't want to cache the results if there is an
    // error
    func (o *cacheOnce) cache(f func() error) {
    	if atomic.LoadUint32(&o.done) == 1 {
    		return
    	}
    	// Slow-path.
    	o.m.Lock()
    	defer o.m.Unlock()
    	if o.done == 0 {
    		err := f()
    		if err == nil {
    			atomic.StoreUint32(&o.done, 1)
    		}
    	}
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Fri Sep 16 11:12:06 UTC 2022
    - 1.9K bytes
    - Viewed (0)
  8. pkg/lazy/lazy.go

    // non-nil error is returned.
    func NewWithRetry[T any](f func() (T, error)) Lazy[T] {
    	return &lazyImpl[T]{getter: f, retry: true}
    }
    
    func (l *lazyImpl[T]) Get() (T, error) {
    	if atomic.LoadUint32(&l.done) == 0 {
    		// Outlined slow-path to allow inlining of the fast-path.
    		return l.doSlow()
    	}
    	return l.res, l.err
    }
    
    func (l *lazyImpl[T]) doSlow() (T, error) {
    	l.m.Lock()
    	defer l.m.Unlock()
    Registered: Fri Jun 14 15:00:06 UTC 2024
    - Last Modified: Wed Aug 17 22:54:10 UTC 2022
    - 2.2K bytes
    - Viewed (0)
  9. internal/grid/connection.go

    	if debugPrint {
    		fmt.Println(c.Local, "->", c.Remote, "WaitForConnect")
    		defer fmt.Println(c.Local, "->", c.Remote, "WaitForConnect done")
    	}
    	c.connChange.L.Lock()
    	if atomic.LoadUint32((*uint32)(&c.state)) == StateConnected {
    		c.connChange.L.Unlock()
    		// Happy path.
    		return nil
    	}
    	ctx, cancel := context.WithCancel(ctx)
    	defer cancel()
    	changed := make(chan State, 1)
    	go func() {
    Registered: Sun Jun 16 00:44:34 UTC 2024
    - Last Modified: Fri Jun 07 15:51:52 UTC 2024
    - 44.8K bytes
    - Viewed (0)
  10. pkg/test/echo/server/instance.go

    		Dialer:        s.Dialer,
    		ListenerIP:    listenerIP,
    		DisableALPN:   s.DisableALPN,
    		IstioVersion:  s.IstioVersion,
    	})
    }
    
    func (s *Instance) isReady() bool {
    	return atomic.LoadUint32(&s.ready) == 1
    }
    
    func (s *Instance) waitUntilReady() error {
    	wg := &sync.WaitGroup{}
    
    	onEndpointReady := func() {
    		wg.Done()
    	}
    
    	// Start the servers, updating port numbers as necessary.
    Registered: Fri Jun 14 15:00:06 UTC 2024
    - Last Modified: Fri Jun 09 07:54:01 UTC 2023
    - 7.1K bytes
    - Viewed (0)
Back to top