- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 87 for race (0.06 sec)
-
src/fmt/fmt_test.go
switch { case testing.Short(): t.Skip("skipping malloc count in short mode") case runtime.GOMAXPROCS(0) > 1: t.Skip("skipping; GOMAXPROCS>1") case race.Enabled: t.Skip("skipping malloc count under race detector") } for _, mt := range mallocTest { mallocs := testing.AllocsPerRun(100, mt.fn) if got, max := mallocs, float64(mt.count); got > max {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:31:55 UTC 2024 - 58.6K bytes - Viewed (0) -
CHANGELOG/CHANGELOG-1.3.md
## Changelog since v1.3.7 ### Other notable changes * AWS: fix volume device assignment race condition ([#31090](https://github.com/kubernetes/kubernetes/pull/31090), [@justinsb](https://github.com/justinsb)) # v1.3.7
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Thu Dec 24 02:28:26 UTC 2020 - 84K bytes - Viewed (0) -
src/net/http/httputil/reverseproxy_test.go
return nil }, }) req, _ := http.NewRequestWithContext(ctx, "GET", "http://go.dev/", nil) proxyHandler.ServeHTTP(rw, req) }() // Trigger data race while iterating over response headers. // When run with -race, this causes the condition in https://go.dev/issue/65123 often // enough to detect reliably. for _ = range rw.Header() { } } func Test1xxResponses(t *testing.T) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:00:11 UTC 2024 - 54.6K bytes - Viewed (0) -
staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go
} if lastGraceful > 0 { return nil, false, false, out, lastExisting } // If we are here, the registry supports grace period mechanism and // we are intentionally delete gracelessly. In this case, we may // enter a race with other k8s components. If other component wins // the race, the object will not be found, and we should tolerate // the NotFound error. See
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Fri Jan 19 23:22:44 UTC 2024 - 60.8K bytes - Viewed (0) -
android/guava/src/com/google/common/util/concurrent/AbstractFuture.java
ATOMIC_HELPER.putNext(this, next); } void unpark() { // This is racy with removeWaiter. The consequence of the race is that we may spuriously call // unpark even though the thread has already removed itself from the list. But even if we did // use a CAS, that race would still exist (it would just be ever so slightly smaller). Thread w = thread; if (w != null) {
Registered: Wed Jun 12 16:38:11 UTC 2024 - Last Modified: Fri Jun 07 22:25:23 UTC 2024 - 63.1K bytes - Viewed (0) -
pkg/kubelet/volumemanager/reconciler/reconciler_test.go
pv := getTestPV(tc.pvName, tc.volumeMode, tc.oldPVSize) pvc := getTestPVC("pv", tc.volumeMode, tc.pvcSize, tc.pvcStatusSize) pod := getTestPod(pvc.Name) // deep copy before reconciler runs to avoid data race. pvWithSize := pv.DeepCopy() node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: string(nodeName), }, Spec: v1.NodeSpec{}, Status: v1.NodeStatus{
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Apr 09 07:34:33 UTC 2024 - 75.4K bytes - Viewed (0) -
guava/src/com/google/common/util/concurrent/AbstractFuture.java
ATOMIC_HELPER.putNext(this, next); } void unpark() { // This is racy with removeWaiter. The consequence of the race is that we may spuriously call // unpark even though the thread has already removed itself from the list. But even if we did // use a CAS, that race would still exist (it would just be ever so slightly smaller). Thread w = thread; if (w != null) {
Registered: Wed Jun 12 16:38:11 UTC 2024 - Last Modified: Fri Jun 07 22:25:23 UTC 2024 - 62.8K bytes - Viewed (1) -
src/runtime/mgcmark.go
if enteredMarkAssistForTracing { trace := traceAcquire() if trace.ok() { trace.GCMarkAssistDone() // Set this *after* we trace the end to make sure // that we emit an in-progress event if this is // the first event for the goroutine in the trace // or trace generation. Also, do this between // acquire/release because this is part of the // goroutine's trace state, and it must be atomic
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 18 21:25:11 UTC 2024 - 52.5K bytes - Viewed (0) -
src/runtime/mgcpacer.go
c.lastStackScan.Store(uint64(c.stackScanWork.Load())) c.triggered = ^uint64(0) // Reset triggered. // heapLive was updated, so emit a trace event. trace := traceAcquire() if trace.ok() { trace.HeapAlloc(bytesMarked) traceRelease(trace) } } // markWorkerStop must be called whenever a mark worker stops executing. // // It updates mark work accounting in the controller by a duration of
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 55.4K bytes - Viewed (0) -
src/cmd/go/go_test.go
canRace = testenv.HasCGO() && platform.RaceDetectorSupported(runtime.GOOS, runtime.GOARCH) // The race detector doesn't work on Alpine Linux: // golang.org/issue/14481 // gccgo does not support the race detector. if isAlpineLinux() || runtime.Compiler == "gccgo" { canRace = false } } if n, limited := base.NetLimit(); limited && n > 0 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:00:11 UTC 2024 - 81.1K bytes - Viewed (0)