- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 60 for addUint32 (0.17 sec)
-
src/testing/benchmark_test.go
if testing.Short() { t.Skip("skipping in short mode") } testing.Benchmark(func(b *testing.B) { procs := uint32(0) iters := uint64(0) b.SetParallelism(3) b.RunParallel(func(pb *testing.PB) { atomic.AddUint32(&procs, 1) for pb.Next() { atomic.AddUint64(&iters, 1) } }) if want := uint32(3 * runtime.GOMAXPROCS(0)); procs != want { t.Errorf("got %v procs, want %v", procs, want) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:00:11 UTC 2024 - 5.6K bytes - Viewed (0) -
internal/http/server.go
w.Header().Set(RetryAfter, "60") w.WriteHeader(http.StatusServiceUnavailable) w.Write([]byte(http.ErrServerClosed.Error())) return } atomic.AddInt32(&srv.requestCount, 1) defer atomic.AddInt32(&srv.requestCount, -1) // Handle request using passed handler. handler.ServeHTTP(w, r) }) srv.listenerMutex.Lock() srv.Handler = wrappedHandler srv.listener = listener
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Fri Feb 09 21:25:16 UTC 2024 - 7.7K bytes - Viewed (0) -
src/runtime/race/race_test.go
) var wg sync.WaitGroup wg.Add(G) for g := 0; g < G; g++ { go func() { defer wg.Done() hold := make([][]uint32, H) for i := 0; i < b.N; i++ { a := make([]uint32, S) atomic.AddUint32(&a[rand.Intn(len(a))], 1) hold[rand.Intn(len(hold))] = a } _ = hold }() } wg.Wait() } func BenchmarkStackLeak(b *testing.B) { done := make(chan bool, 1)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Dec 13 18:45:54 UTC 2021 - 6K bytes - Viewed (0) -
src/sync/atomic/type.go
// Add atomically adds delta to x and returns the new value. func (x *Uint32) Add(delta uint32) (new uint32) { return AddUint32(&x.v, delta) } // And atomically performs a bitwise AND operation on x using the bitmask // provided as mask and returns the old value. func (x *Uint32) And(mask uint32) (old uint32) { return AndUint32(&x.v, mask) } // Or atomically performs a bitwise OR operation on x using the bitmask
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:37:29 UTC 2024 - 8.5K bytes - Viewed (0) -
src/sync/pool_test.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:00:11 UTC 2024 - 8K bytes - Viewed (0) -
src/cmd/link/internal/loong64/asm.go
"debug/elf" "log" ) func gentext(ctxt *ld.Link, ldr *loader.Loader) { initfunc, addmoduledata := ld.PrepareAddmoduledata(ctxt) if initfunc == nil { return } o := func(op uint32) { initfunc.AddUint32(ctxt.Arch, op) } // Emit the following function: // // local.dso_init: // la.pcrel $a0, local.moduledata // b runtime.addmoduledata // 0000000000000000 <local.dso_init>:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Feb 27 17:26:07 UTC 2024 - 7.5K bytes - Viewed (0) -
src/sync/map_test.go
// Set finalizers that count for collected keys. A non-zero count // indicates that keys have not been leaked. for atomic.LoadUint32(&finalized) == 0 { p := new(int) runtime.SetFinalizer(p, func(*int) { atomic.AddUint32(&finalized, 1) }) m.Store(p, struct{}{}) m.Delete(p) runtime.GC() } } func TestMapRangeNestedCall(t *testing.T) { // Issue 46399 var m sync.Map
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Feb 01 15:34:22 UTC 2024 - 8.1K bytes - Viewed (0) -
test/fixedbugs/issue24449.go
// adds and then re-use the flag value to see if // the atomic add has clobbered them. atomic.AddInt32(&cnt32, 1) if len(a) == len(b) { atomic.AddInt32(&cnt32, 2) } atomic.AddInt32(&cnt32, 4) if len(a) >= len(b) { atomic.AddInt32(&cnt32, 8) } if len(a) <= len(b) { atomic.AddInt32(&cnt32, 16) } return atomic.LoadInt32(&cnt32) == 31 } var cnt64 int64 //go:noinline
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Mar 20 09:44:50 UTC 2018 - 1.2K bytes - Viewed (0) -
src/runtime/rwmutex_test.go
func reader(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) { for i := 0; i < num_iterations; i++ { rwm.RLock() n := atomic.AddInt32(activity, 1) if n < 1 || n >= 10000 { panic(fmt.Sprintf("wlock(%d)\n", n)) } for i := 0; i < 100; i++ { } atomic.AddInt32(activity, -1) rwm.RUnlock() } cdone <- true } func writer(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Dec 15 22:00:45 UTC 2023 - 4.2K bytes - Viewed (0) -
internal/pubsub/pubsub.go
func (ps *PubSub[T, M]) Subscribe(mask M, subCh chan T, doneCh <-chan struct{}, filter func(entry T) bool) error { totalSubs := atomic.AddInt32(&ps.numSubscribers, 1) if ps.maxSubscribers > 0 && totalSubs > ps.maxSubscribers { atomic.AddInt32(&ps.numSubscribers, -1) return fmt.Errorf("the limit of `%d` subscribers is reached", ps.maxSubscribers) } ps.Lock() defer ps.Unlock()
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Tue Feb 06 16:57:30 UTC 2024 - 5.2K bytes - Viewed (0)