- Sort Score
- Result 10 results
- Languages All
Results 71 - 80 of 111 for race (0.06 sec)
-
src/cmd/link/link_test.go
} func main() { callee(9) } ` func TestIssue42396(t *testing.T) { testenv.MustHaveGoBuild(t) if !platform.RaceDetectorSupported(runtime.GOOS, runtime.GOARCH) { t.Skip("no race detector support") } t.Parallel() tmpdir := t.TempDir() src := filepath.Join(tmpdir, "main.go") err := os.WriteFile(src, []byte(testIssue42396src), 0666) if err != nil {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 24 20:26:02 UTC 2024 - 43.5K bytes - Viewed (0) -
Makefile.core.mk
# For now, keep a minimal subset. This can be expanded in the future. BENCH_TARGETS ?= ./pilot/... PKG ?= ./... .PHONY: racetest racetest: $(JUNIT_REPORT) go test ${GOBUILDFLAGS} ${T} -race $(PKG) 2>&1 | tee >($(JUNIT_REPORT) > $(JUNIT_OUT)) .PHONY: benchtest benchtest: $(JUNIT_REPORT) ## Runs all benchmarks prow/benchtest.sh run $(BENCH_TARGETS) prow/benchtest.sh compare report-benchtest:
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Sun Jun 02 19:53:04 UTC 2024 - 23.2K bytes - Viewed (0) -
src/syscall/fs_wasip1.go
// determine ahead of time if the path we are about to open is a // directory, so instead we fallback to a second call to path_open with // a more limited set of rights. // // This approach is subject to a race if the file system is modified // concurrently, so we also inject OFLAG_DIRECTORY to ensure that we do // not accidentally open a file which is not a directory. errno = path_open( dirFd, LOOKUP_SYMLINK_FOLLOW,
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 19:57:43 UTC 2024 - 24.1K bytes - Viewed (0) -
pilot/pkg/xds/delta.go
peerAddr = peerInfo.Addr.String() } if err := s.WaitForRequestLimit(stream.Context()); err != nil { deltaLog.Warnf("ADS: %q exceeded rate limit: %v", peerAddr, err) return status.Errorf(codes.ResourceExhausted, "request rate limit exceeded: %v", err) } ids, err := s.authenticate(ctx) if err != nil { return status.Error(codes.Unauthenticated, err.Error()) } if ids != nil {
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Fri Jun 14 04:34:37 UTC 2024 - 25.6K bytes - Viewed (0) -
pilot/pkg/config/kube/gateway/deploymentcontroller.go
return cur, false, false } if curNum == ControllerVersion { // We already manage this at this version // We will manage it, but no need to attempt to apply the version annotation, which could race with newer versions return cur, false, true } // We are either newer or the same version of the last owner - we can take over. We need to actually // re-apply the annotation return cur, true, true }
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Mon May 13 21:43:20 UTC 2024 - 26.3K bytes - Viewed (0) -
src/cmd/compile/internal/types2/named.go
func (n *Named) expandUnderlying() Type { check := n.check if check != nil && check.conf.Trace { check.trace(n.obj.pos, "-- Named.expandUnderlying %s", n) check.indent++ defer func() { check.indent-- check.trace(n.obj.pos, "=> %s (tparams = %s, under = %s)", n, n.tparams.list(), n.underlying) }() } assert(n.inst.orig.underlying != nil)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 20:03:31 UTC 2024 - 23.9K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util.cc
// separate stream from the main compute stream. We must ensure the // compute stream is synchronized with the host->device transfer // stream now otherwise we will create a race condition. auto* gpu_device_context = static_cast<GPUDeviceContext*>(ctx->op_device_context()); TF_RETURN_IF_ERROR(gpu_device_context->stream()->WaitFor(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 40.4K bytes - Viewed (0) -
src/encoding/gob/type.go
if ui, ok := userTypeCache.Load(rt); ok { return ui.(*userTypeInfo), nil } // Construct a new userTypeInfo and atomically add it to the userTypeCache. // If we lose the race, we'll waste a little CPU and create a little garbage // but return the existing value anyway. ut := new(userTypeInfo) ut.base = rt ut.user = rt // A type that is just a cycle of pointers (such as type T *T) cannot
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 16 02:00:26 UTC 2024 - 27.2K bytes - Viewed (0) -
cmd/kubeadm/app/phases/upgrade/staticpods.go
fmt.Printf("[upgrade/staticpods] This can take up to %v\n", kubeadmapi.GetActiveTimeouts().UpgradeManifests.Duration) // Wait for the mirror Pod hash to change; otherwise we'll run into race conditions here when the kubelet hasn't had time to // notice the removal of the Static Pod, leading to a false positive below where we check that the API endpoint is healthy
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed May 29 10:07:41 UTC 2024 - 30.7K bytes - Viewed (0) -
src/go/types/named.go
func (n *Named) expandUnderlying() Type { check := n.check if check != nil && check.conf._Trace { check.trace(n.obj.pos, "-- Named.expandUnderlying %s", n) check.indent++ defer func() { check.indent-- check.trace(n.obj.pos, "=> %s (tparams = %s, under = %s)", n, n.tparams.list(), n.underlying) }() } assert(n.inst.orig.underlying != nil)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 20:03:31 UTC 2024 - 24K bytes - Viewed (0)