- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 37 for goroutine (0.11 sec)
-
docs/debugging/pprofgoparser/main.go
) func init() { flag.DurationVar(&less, "less", 0, "goroutine waiting less than the specified time") flag.DurationVar(&goTime, "time", 0, "goroutine waiting for exactly the specified time") flag.DurationVar(&margin, "margin", 0, "margin time") flag.StringVar(&searchText, "search", "", "Regex to search for a text in one goroutine stacktrace") }
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Wed Mar 06 11:43:16 UTC 2024 - 3.4K bytes - Viewed (0) -
cmd/leak-detect_test.go
for _, g := range leaked { t.Errorf("Leaked goroutine: %v", g) } return } } // DetectTestLeak - snapshots the currently running goroutines and returns a // function to be run at the end of tests to see whether any // goroutines leaked. // Usage: `defer DetectTestLeak(t)()` in beginning line of benchmarks or unit tests.
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Thu Jan 18 07:03:17 UTC 2024 - 5.2K bytes - Viewed (0) -
src/archive/zip/register.go
// The WriteCloser's Close method must be used to flush pending data to w. // The Compressor itself must be safe to invoke from multiple goroutines // simultaneously, but each returned writer will be used only by // one goroutine at a time. type Compressor func(w io.Writer) (io.WriteCloser, error) // A Decompressor returns a new decompressing reader, reading from r.
Registered: Tue Nov 05 11:13:11 UTC 2024 - Last Modified: Fri Oct 13 18:36:46 UTC 2023 - 3.7K bytes - Viewed (0) -
manifests/addons/dashboards/pilot.libsonnet
panels.timeSeries.base('CPU Usage', queries.cpuUsage, 'CPU usage of each running instance'), panels.timeSeries.base('Goroutines', queries.goroutines, 'Goroutine count for each running instance'), ]), ], panelHeight=10, startY=1) + g.util.grid.makeGrid([ row.new('Push Information') + row.withPanels([ panels.timeSeries.xdsPushes(
Registered: Wed Nov 06 22:53:10 UTC 2024 - Last Modified: Wed Jun 12 20:46:28 UTC 2024 - 2.9K bytes - Viewed (0) -
internal/s3select/json/reader.go
return dstRec, nil } // Close - closes underlying reader. func (r *Reader) Close() error { // Close the input. err := r.readCloser.Close() for range r.valueCh { // Drain values so we don't leak a goroutine. // Since we have closed the input, it should fail rather quickly. } return err } // NewReader - creates new JSON reader using readCloser. func NewReader(readCloser io.ReadCloser, args *ReaderArgs) *Reader {
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Mon Sep 23 19:35:41 UTC 2024 - 3.2K bytes - Viewed (0) -
internal/http/listener.go
acceptCh chan acceptResult // channel where all TCP listeners write accepted connection. ctx context.Context ctxCanceler context.CancelFunc } // start - starts separate goroutine for each TCP listener. A valid new connection is passed to httpListener.acceptCh. func (listener *httpListener) start() { // Closure to send acceptResult to acceptCh.
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Tue Jul 23 10:53:03 UTC 2024 - 5.6K bytes - Viewed (0) -
internal/grid/muxserver.go
m.outBlock <- struct{}{} } // Handler goroutine. var handlerErr atomic.Pointer[RemoteErr] go func() { wg.Wait() defer xioutil.SafeClose(send) err := m.handleRequests(ctx, msg, send, handler, handlerIn) if err != nil { handlerErr.Store(err) } }() // Response sender goroutine... go func(outBlock <-chan struct{}) { wg.Wait()
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Jun 07 15:51:52 UTC 2024 - 9.7K bytes - Viewed (0) -
cmd/shared-lock.go
ctx, cancel := context.WithCancel(context.Background()) go func() { select { case <-ctx1.Done(): case <-ctx2.Done(): // The lock acquirer decides to cancel, exit this goroutine case <-ctx.Done(): } cancel() }() return ctx, cancel } func (ld sharedLock) GetLock(ctx context.Context) (context.Context, context.CancelFunc) { l := <-ld.lockContext
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Mon Feb 13 09:26:38 UTC 2023 - 2.3K bytes - Viewed (0) -
tests/prepared_stmt_test.go
// TestPreparedStmtConcurrentClose test calling close and executing SQL concurrently // for example: one goroutine found error and just close the database, and others are executing SQL // this test making sure that the gorm would not get a Segmentation Fault, // and the only error cause by this is using a closed Stmt or gorm.ErrInvalidDB // and all of the goroutine must got gorm.ErrInvalidDB after database close func TestPreparedStmtConcurrentClose(t *testing.T) {
Registered: Sun Nov 03 09:35:10 UTC 2024 - Last Modified: Thu Aug 22 11:02:05 UTC 2024 - 8.5K bytes - Viewed (0) -
internal/s3select/progress.go
closedMu sync.Mutex closer io.ReadCloser closed bool } func (pr *progressReader) Read(p []byte) (n int, err error) { // This ensures that Close will block until Read has completed. // This allows another goroutine to close the reader. pr.closedMu.Lock() defer pr.closedMu.Unlock() if pr.closed { return 0, errors.New("progressReader: read after Close") } return pr.processedReader.Read(p) }
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Sun Sep 22 00:33:43 UTC 2024 - 4.3K bytes - Viewed (0)