- Sort Score
- Result 10 results
- Languages All
Results 61 - 70 of 85 for Loadint32 (0.14 sec)
-
pkg/util/iptables/monitor_test.go
mfe.tables["nat"].Len() == 0 } func waitForReloads(reloads *uint32, expected uint32) error { if atomic.LoadUint32(reloads) < expected { utilwait.PollImmediate(100*time.Millisecond, time.Second, func() (bool, error) { return atomic.LoadUint32(reloads) >= expected, nil }) } got := atomic.LoadUint32(reloads) if got != expected { return fmt.Errorf("expected %d, got %d", expected, got) } return nil }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed Nov 08 15:21:59 UTC 2023 - 9.3K bytes - Viewed (0) -
internal/config/lambda/target/lazyinit.go
type lazyInit struct { done uint32 m sync.Mutex } func (l *lazyInit) Do(f func() error) error { if atomic.LoadUint32(&l.done) == 0 { return l.doSlow(f) } return nil } func (l *lazyInit) doSlow(f func() error) error { l.m.Lock() defer l.m.Unlock() if atomic.LoadUint32(&l.done) == 0 { if err := f(); err != nil { return err } // Mark as done only when f() is successful
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Tue Mar 07 16:12:41 UTC 2023 - 1.3K bytes - Viewed (0) -
src/runtime/testdata/testprog/preempt.go
}() // Also test empty infinite loop. go func() { atomic.AddUint32(&ready2, 1) for { } }() // Wait for the goroutine to stop passing through sync // safe-points. for atomic.LoadUint32(&ready) == 0 || atomic.LoadUint32(&ready2) < 2 { runtime.Gosched() } // Run a GC, which will have to stop the goroutine for STW and // for stack scanning. If this doesn't work, the test will // deadlock and timeout.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Dec 07 17:46:04 UTC 2021 - 1.6K bytes - Viewed (0) -
src/testing/cover.go
// 'go tool cover'. func Coverage() float64 { if goexperiment.CoverageRedesign { return coverage2() } var n, d int64 for _, counters := range cover.Counters { for i := range counters { if atomic.LoadUint32(&counters[i]) > 0 { n++ } d++ } } if d == 0 { return 0 } return float64(n) / float64(d) } // RegisterCover records the coverage data accumulators for the tests.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 23 11:37:31 UTC 2023 - 3.4K bytes - Viewed (0) -
internal/s3select/simdj/reader.go
type safeCloser struct { closed uint32 r io.Reader } func (s *safeCloser) Read(p []byte) (n int, err error) { if atomic.LoadUint32(&s.closed) == 1 { return 0, io.EOF } n, err = s.r.Read(p) if atomic.LoadUint32(&s.closed) == 1 { return 0, io.EOF } return n, err } func (s *safeCloser) Close() error { atomic.CompareAndSwapUint32(&s.closed, 0, 1)
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Tue May 30 17:02:22 UTC 2023 - 4.9K bytes - Viewed (0) -
src/runtime/debug_test.go
} close(done) } // Don't inline this function, since we want to test adjusting // pointers in the arguments. // //go:noinline func debugCallWorker2(stop *uint32, x *int) { for atomic.LoadUint32(stop) == 0 { // Strongly encourage x to live in a register so we // can test pointer register adjustment. *x++ } *x = 1 } func debugCallTKill(tid int) error {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Sep 08 15:08:04 UTC 2023 - 8K bytes - Viewed (0) -
src/runtime/runtime_unix_test.go
var stop uint32 defer atomic.StoreUint32(&stop, 1) // in case of panic var wg sync.WaitGroup for i := 0; i < 4; i++ { wg.Add(1) go func() { for atomic.LoadUint32(&stop) == 0 { syscall.Close(-1) } wg.Done() }() } max := 10000 if testing.Short() { max = 100 } stk := make([]runtime.StackRecord, 128) for n := 0; n < max; n++ {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 28 18:17:57 UTC 2021 - 1.2K bytes - Viewed (0) -
src/cmd/compile/internal/ssagen/ssa.go
// Note: these are disabled by flag_race in findIntrinsic below. alias("sync/atomic", "LoadInt32", "internal/runtime/atomic", "Load", all...) alias("sync/atomic", "LoadInt64", "internal/runtime/atomic", "Load64", all...) alias("sync/atomic", "LoadPointer", "internal/runtime/atomic", "Loadp", all...) alias("sync/atomic", "LoadUint32", "internal/runtime/atomic", "Load", all...)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Jun 10 19:44:43 UTC 2024 - 284.9K bytes - Viewed (0) -
src/runtime/testdata/testprogcgo/lockosthread.go
// Check that this goroutine is running on a different thread. self := C.pthread_self() if C.pthread_equal(subThread, self) != 0 { println("locked thread reused") os.Exit(1) } if atomic.LoadUint32((*uint32)(&C.threadExited)) != 0 { println("OK") return } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Feb 02 20:21:33 UTC 2023 - 2.5K bytes - Viewed (0) -
src/runtime/race/race_linux_test.go
if err != nil { t.Fatalf("mprotect high failed %s\n", err) } // This should not fault. a := (*uint32)(unsafe.Pointer(&b[pagesize-4])) atomic.StoreUint32(a, 1) if x := atomic.LoadUint32(a); x != 1 { t.Fatalf("bad atomic value: %v, want 1", x) } if x := atomic.AddUint32(a, 1); x != 2 { t.Fatalf("bad atomic value: %v, want 2", x) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 16 14:09:02 UTC 2023 - 1.9K bytes - Viewed (0)