- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 42 for newSource (0.14 sec)
-
cmd/erasure-sets.go
// the set topology, this monitoring happens at a given monitoring interval. func (s *erasureSets) monitorAndConnectEndpoints(ctx context.Context, monitorInterval time.Duration) { r := rand.New(rand.NewSource(time.Now().UnixNano())) time.Sleep(time.Duration(r.Float64() * float64(time.Second))) // Pre-emptively connect the disks if possible. s.connectDisks(false) monitor := time.NewTimer(monitorInterval)
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Sep 27 10:41:37 UTC 2024 - 37K bytes - Viewed (1) -
cmd/erasure-server-pool-decom.go
go func() { // Resume decommissioning of pools, but wait 3 minutes for cluster to stabilize. if err := sleepContext(ctx, 3*time.Minute); err != nil { return } r := rand.New(rand.NewSource(time.Now().UnixNano())) for { if err := z.Decommission(ctx, poolIndices...); err != nil { if errors.Is(err, errDecommissionAlreadyRunning) { // A previous decommission running found restart it.
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Sep 06 13:20:19 UTC 2024 - 42.2K bytes - Viewed (0) -
cmd/notification.go
// // The first call to return a non-nil error will be // collected in errs slice and returned by Wait(). func (g *NotificationGroup) Go(ctx context.Context, f func() error, index int, addr xnet.Host) { r := rand.New(rand.NewSource(time.Now().UnixNano())) g.workers.Take() go func() { defer g.workers.Give() g.errs[index] = NotificationPeerErr{ Host: addr, } retryCount := g.retryCount
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Mon Sep 09 16:58:30 UTC 2024 - 46.2K bytes - Viewed (0) -
cmd/data-scanner.go
) // initDataScanner will start the scanner in the background. func initDataScanner(ctx context.Context, objAPI ObjectLayer) { go func() { r := rand.New(rand.NewSource(time.Now().UnixNano())) // Run the data scanner in a loop for { runDataScanner(ctx, objAPI) duration := time.Duration(r.Float64() * float64(scannerCycle.Load())) if duration < time.Second {
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Tue Oct 22 21:10:34 UTC 2024 - 48.4K bytes - Viewed (0) -
internal/grid/connection.go
if err != nil { return err } return wsutil.WriteMessage(conn, c.side, ws.OpBinary, dst) } func (c *Connection) connect() { c.updateState(StateConnecting) rng := rand.New(rand.NewSource(time.Now().UnixNano())) // Runs until the server is shut down. for { if c.State() == StateShutdown { return } dialStarted := time.Now() if debugPrint {
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Mon Jul 29 18:10:04 UTC 2024 - 46.7K bytes - Viewed (0) -
src/bufio/bufio_test.go
t.Errorf("have=%q", written) } } } } } func TestWriterAppend(t *testing.T) { got := new(bytes.Buffer) var want []byte rn := rand.New(rand.NewSource(0)) w := NewWriterSize(got, 64) for i := 0; i < 100; i++ { // Obtain a buffer to append to. b := w.AvailableBuffer() if w.Available() != cap(b) {
Registered: Tue Nov 05 11:13:11 UTC 2024 - Last Modified: Fri Nov 01 21:52:12 UTC 2024 - 51.6K bytes - Viewed (0) -
cmd/test-utils_test.go
req.Header.Set(k, v) } err = signRequestV4(req, accessKey, secretKey) if err != nil { return nil, err } return req, nil } var src = rand.NewSource(time.Now().UnixNano()) func randString(n int) string { b := make([]byte, n) // A src.Int63() generates 63 random bits, enough for letterIdxMax characters!
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Tue Oct 01 22:13:18 UTC 2024 - 77K bytes - Viewed (0) -
src/bytes/bytes_test.go
} } } // Shuffle the runes so that they are not in descending order. // The sort is deterministic since this is used for benchmarks, // which need to be repeatable. rr := rand.New(rand.NewSource(1)) rr.Shuffle(len(rs), func(i, j int) { rs[i], rs[j] = rs[j], rs[i] }) uchars := string(rs) return func(b *testing.B, n int) { buf := bmbuf[0:n] o := copy(buf, uchars)
Registered: Tue Nov 05 11:13:11 UTC 2024 - Last Modified: Mon Aug 19 19:09:04 UTC 2024 - 61.2K bytes - Viewed (0) -
cmd/erasure-server-pool.go
// so various tasks will be useful bootstrapTrace("initAutoHeal", func() { initAutoHeal(GlobalContext, z) }) // initialize the object layer. defer setObjectLayer(z) r := rand.New(rand.NewSource(time.Now().UnixNano())) attempt := 1 for { var err error bootstrapTrace(fmt.Sprintf("poolMeta.Init: loading pool metadata, attempt: %d", attempt), func() { err = z.Init(ctx) // Initializes all pools. })
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Sun Sep 29 22:40:36 UTC 2024 - 89.8K bytes - Viewed (0) -
cmd/bucket-replication.go
go p.startResyncRoutine(ctx, buckets, objAPI) return nil } func (p *ReplicationPool) startResyncRoutine(ctx context.Context, buckets []string, objAPI ObjectLayer) { r := rand.New(rand.NewSource(time.Now().UnixNano())) // Run the replication resync in a loop for { if err := p.loadResync(ctx, buckets, objAPI); err == nil { <-ctx.Done() return }
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Thu Oct 10 06:49:55 UTC 2024 - 116.1K bytes - Viewed (0)