- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 50 for goroutine (0.95 sec)
-
doc/go_mem.html
Some, such as atomic compare-and-swap, are both read-like and write-like. </p> <p> A <i>goroutine execution</i> is modeled as a set of memory operations executed by a single goroutine. </p> <p> <b>Requirement 1</b>: The memory operations in each goroutine must correspond to a correct sequential execution of that goroutine, given the values read from and written to memory.
Registered: Tue Sep 09 11:13:09 UTC 2025 - Last Modified: Tue Aug 05 15:41:37 UTC 2025 - 26.6K bytes - Viewed (0) -
cmd/leak-detect_test.go
for _, g := range leaked { t.Errorf("Leaked goroutine: %v", g) } return } } // DetectTestLeak - snapshots the currently running goroutines and returns a // function to be run at the end of tests to see whether any // goroutines leaked. // Usage: `defer DetectTestLeak(t)()` in beginning line of benchmarks or unit tests.
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 5.2K bytes - Viewed (0) -
cmd/erasure.go
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 16.1K bytes - Viewed (0) -
cmd/admin-handlers_test.go
} result := &serviceResult{} if err := json.Unmarshal(resp, result); err != nil { t.Error(err) } _ = result // Wait until testServiceSignalReceiver() called in a goroutine quits. wg.Wait() } // Test for service restart management REST API. func TestServiceRestartHandler(t *testing.T) { testServicesCmdHandler(restartCmd, t) }
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 13.9K bytes - Viewed (0) -
cmd/namespace-lock_test.go
// lk2 lk2ch := make(chan struct{}) go func() { defer close(lk2ch) nsLk.lock(ctx, "volume", "path", "source", "opsID", false, 1*time.Millisecond) }() time.Sleep(1 * time.Millisecond) // wait for goroutine to advance; ref=2 // Unlock the 1st lock; ref=1 after this line nsLk.unlock("volume", "path", false) // Taking another lockMapMutex here allows queuing up additional lockers. This should
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 3K bytes - Viewed (0) -
cmd/utils.go
return buf.Bytes(), err } case madmin.ProfilerGoroutines: prof.ext = "txt" prof.record("goroutine", 1, "before") prof.record("goroutine", 2, "before,debug=2") prof.stopFn = func() ([]byte, error) { var buf bytes.Buffer err := pprof.Lookup("goroutine").WriteTo(&buf, 1) return buf.Bytes(), err } case madmin.ProfilerTrace: dirPath, err := os.MkdirTemp("", "profile")
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 33K bytes - Viewed (0) -
cmd/data-scanner_test.go
LockEnabled: true, } expiryWorker := func(wg *sync.WaitGroup, readyCh chan<- struct{}, taskCh <-chan expiryOp, gotExpired *[]ObjectToDelete) { defer wg.Done() // signal the calling goroutine that the worker is ready tor receive tasks close(readyCh) var expired []ObjectToDelete for t := range taskCh { switch v := t.(type) { case noncurrentVersionsTask:
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 12K bytes - Viewed (0) -
cmd/notification.go
// returns the slice of errors from all function calls. func (g *NotificationGroup) Wait() []NotificationPeerErr { g.workers.Wait() return g.errs } // Go calls the given function in a new goroutine. // // The first call to return a non-nil error will be // collected in errs slice and returned by Wait(). func (g *NotificationGroup) Go(ctx context.Context, f func() error, index int, addr xnet.Host) {
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 45.9K bytes - Viewed (0) -
cmd/erasure-server-pool-rebalance.go
if err != nil { rebalanceLogIf(ctx, fmt.Errorf("invalid workers value err: %v, defaulting to %d", err, len(pool.sets))) workerSize = len(pool.sets) } // Each decom worker needs one List() goroutine/worker // add that many extra workers. workerSize += len(pool.sets) wk, err := workers.New(workerSize) if err != nil { return err } for setIdx, set := range pool.sets {
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Thu Sep 04 20:47:24 UTC 2025 - 28.9K bytes - Viewed (0) -
cmd/admin-heal-ops.go
h.mutex.Unlock() return nil } // healSequenceStart - this is the top-level background heal // routine. It launches another go-routine that actually traverses // on-disk data, checks and heals according to the selected // settings. This go-routine itself, (1) monitors the traversal // routine for completion, and (2) listens for external stop // signals. When either event happens, it sets the finish status for
Registered: Sun Sep 07 19:28:11 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 25.4K bytes - Viewed (0)