- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 332 for race (0.09 sec)
-
src/internal/trace/trace_test.go
{trace.EventTaskBegin, trace.TaskID(1), []string{"task0"}}, {trace.EventRegionBegin, trace.TaskID(1), []string{"region0"}}, {trace.EventRegionBegin, trace.TaskID(1), []string{"region1"}}, {trace.EventLog, trace.TaskID(1), []string{"key0", "0123456789abcdef"}}, {trace.EventRegionEnd, trace.TaskID(1), []string{"region1"}}, {trace.EventRegionEnd, trace.TaskID(1), []string{"region0"}}, {trace.EventTaskEnd, trace.TaskID(1), []string{"task0"}},
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 18.5K bytes - Viewed (0) -
src/cmd/vendor/golang.org/x/telemetry/internal/upload/reports.go
if cfg.HasCounter(p.Program, k) && report.X <= cfg.Rate(p.Program, k) { x.Counters[k] = v } } // and the same for Stacks // this can be made more efficient, when it matters for k, v := range p.Stacks { before, _, _ := strings.Cut(k, "\n") if cfg.HasStack(p.Program, before) && report.X <= cfg.Rate(p.Program, before) { x.Stacks[k] = v } } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jun 04 14:52:56 UTC 2024 - 10.3K bytes - Viewed (0) -
src/runtime/extern.go
the limit. The GORACE variable configures the race detector, for programs built using -race. See the [Race Detector article] for details. The GOTRACEBACK variable controls the amount of output generated when a Go program fails due to an unrecovered panic or an unexpected runtime condition. By default, a failure prints a stack trace for the current goroutine,
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:52:17 UTC 2024 - 18.9K bytes - Viewed (0) -
src/runtime/string.go
} else { b = rawbyteslice(len(s)) } copy(b, s) return b } func stringtoslicerune(buf *[tmpStringBufSize]rune, s string) []rune { // two passes. // unlike slicerunetostring, no race because strings are immutable. n := 0 for range s { n++ } var a []rune if buf != nil && n <= len(buf) { *buf = [tmpStringBufSize]rune{} a = buf[:n] } else { a = rawruneslice(n)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:17:26 UTC 2024 - 13.4K bytes - Viewed (0) -
src/os/exec/exec_test.go
// This test is run by cmd/dist under the race detector to verify that // the race detector no longer reports any problems. func TestStdinCloseRace(t *testing.T) { t.Parallel() cmd := helperCommand(t, "stdinClose") stdin, err := cmd.StdinPipe() if err != nil { t.Fatalf("StdinPipe: %v", err) } if err := cmd.Start(); err != nil { t.Fatalf("Start: %v", err) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 20:13:53 UTC 2024 - 48.4K bytes - Viewed (0) -
src/os/pidfd_linux.go
} return h, nil } // _P_PIDFD is used as idtype argument to waitid syscall. const _P_PIDFD = 3 func (p *Process) pidfdWait() (*ProcessState, error) { // When pidfd is used, there is no wait/kill race (described in CL 23967) // because the PID recycle issue doesn't exist (IOW, pidfd, unlike PID, // is guaranteed to refer to one particular process). Thus, there is no
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jun 11 18:08:44 UTC 2024 - 4.2K bytes - Viewed (0) -
src/runtime/chan.go
// this way, Go will continue to not allocating buffer entries for channels // of elemsize==0, yet the race detector can be made to handle multiple // sync objects underneath the hood (one sync object per idx) qp := chanbuf(c, idx) // When elemsize==0, we don't allocate a full buffer for the channel. // Instead of individual buffer entries, the race detector uses the // c.buf as the only buffer entry. This simplification prevents us from
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:16:50 UTC 2024 - 25.9K bytes - Viewed (0) -
src/sync/cond.go
// 2. Ensure c is initialized. If the CAS succeeds, we're done. If it fails, c was either initialized concurrently and we simply lost the race, or c has been copied. // 3. Do step 1 again. Now that c is definitely initialized, if this fails, c was copied. if uintptr(*c) != uintptr(unsafe.Pointer(c)) &&
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 21:14:51 UTC 2024 - 4.1K bytes - Viewed (0) -
src/io/pipe_test.go
count = 8 readSize = 2 ) t.Run("Write", func(t *testing.T) { r, w := Pipe() for i := 0; i < count; i++ { go func() { time.Sleep(time.Millisecond) // Increase probability of race if n, err := w.Write([]byte(input)); n != len(input) || err != nil { t.Errorf("Write() = (%d, %v); want (%d, nil)", n, err, len(input)) } }() } buf := make([]byte, count*len(input))
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:00:11 UTC 2024 - 9K bytes - Viewed (0) -
src/slices/slices_test.go
} if n := testing.AllocsPerRun(100, func() { _ = Grow(s2, cap(s2)-len(s2)+1) }); n != 1 { errorf := t.Errorf if race.Enabled || testenv.OptimizationOff() { errorf = t.Logf // this allocates multiple times in race detector mode } errorf("Grow should allocate once when given insufficient capacity; allocated %v times", n) } // Test for negative growth sizes.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 26 13:32:06 UTC 2024 - 33.2K bytes - Viewed (0)