- Sort Score
- Result 10 results
- Languages All
Results 81 - 90 of 2,202 for rand (0.02 sec)
-
cmd/erasure-server-pool-decom.go
go func() { // Resume decommissioning of pools, but wait 3 minutes for cluster to stabilize. if err := sleepContext(ctx, 3*time.Minute); err != nil { return } r := rand.New(rand.NewSource(time.Now().UnixNano())) for { if err := z.Decommission(ctx, poolIndices...); err != nil { if errors.Is(err, errDecommissionAlreadyRunning) { // A previous decommission running found restart it.
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Sep 06 13:20:19 UTC 2024 - 42.2K bytes - Viewed (1) -
docs/distributed/DESIGN.md
``` ## Architecture Expansion of ellipses and choice of erasure sets based on this expansion is an automated process in MinIO. Here are some of the details of our underlying erasure coding behavior.
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Tue Aug 15 23:04:20 UTC 2023 - 8K bytes - Viewed (1) -
api/go1.10.txt
pkg math, func RoundToEven(float64) float64 pkg math/big, const MaxBase = 62 pkg math/big, method (*Float) Sqrt(*Float) *Float pkg math/big, method (*Int) CmpAbs(*Int) int pkg math/rand, func Shuffle(int, func(int, int)) pkg math/rand, method (*Rand) Shuffle(int, func(int, int)) pkg net, method (*TCPListener) SyscallConn() (syscall.RawConn, error) pkg net, method (*UnixListener) SyscallConn() (syscall.RawConn, error)
Registered: Tue Nov 05 11:13:11 UTC 2024 - Last Modified: Tue Feb 06 05:00:01 UTC 2018 - 30.1K bytes - Viewed (0) -
cmd/iam.go
sys.initStore(objAPI, etcdClient) sys.Unlock() retryCtx, cancel := context.WithCancel(ctx) // Indicate to our routine to exit cleanly upon return. defer cancel() r := rand.New(rand.NewSource(time.Now().UnixNano())) // Migrate storage format if needed. for { // Migrate IAM configuration, if necessary. if err := saveIAMFormat(retryCtx, sys.store); err != nil {
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Tue Oct 29 16:01:48 UTC 2024 - 74.6K bytes - Viewed (0) -
cmd/erasure-sets.go
// endpoints by reconnecting them and making sure to place them into right position in // the set topology, this monitoring happens at a given monitoring interval. func (s *erasureSets) monitorAndConnectEndpoints(ctx context.Context, monitorInterval time.Duration) { r := rand.New(rand.NewSource(time.Now().UnixNano())) time.Sleep(time.Duration(r.Float64() * float64(time.Second)))
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Sep 27 10:41:37 UTC 2024 - 37K bytes - Viewed (1) -
cmd/xl-storage-disk-id-check.go
if skipIfSuccessBefore <= 0 { skipIfSuccessBefore = globalDriveConfig.GetMaxTimeout() } } t := time.NewTicker(checkEvery) defer t.Stop() fn := mustGetUUID() rng := rand.New(rand.NewSource(time.Now().UnixNano())) monitor := func() bool { if contextCanceled(ctx) { return false } if p.health.status.Load() != diskHealthOK { return true }
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Sat Oct 26 09:56:26 UTC 2024 - 34.5K bytes - Viewed (0) -
src/bytes/bytes_test.go
} } } // Shuffle the runes so that they are not in descending order. // The sort is deterministic since this is used for benchmarks, // which need to be repeatable. rr := rand.New(rand.NewSource(1)) rr.Shuffle(len(rs), func(i, j int) { rs[i], rs[j] = rs[j], rs[i] }) uchars := string(rs) return func(b *testing.B, n int) { buf := bmbuf[0:n] o := copy(buf, uchars)
Registered: Tue Nov 05 11:13:11 UTC 2024 - Last Modified: Mon Aug 19 19:09:04 UTC 2024 - 61.2K bytes - Viewed (0) -
internal/grid/connection.go
if err != nil { return err } return wsutil.WriteMessage(conn, c.side, ws.OpBinary, dst) } func (c *Connection) connect() { c.updateState(StateConnecting) rng := rand.New(rand.NewSource(time.Now().UnixNano())) // Runs until the server is shut down. for { if c.State() == StateShutdown { return } dialStarted := time.Now() if debugPrint {
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Mon Jul 29 18:10:04 UTC 2024 - 46.7K bytes - Viewed (0) -
cmd/notification.go
} // Go calls the given function in a new goroutine. // // The first call to return a non-nil error will be // collected in errs slice and returned by Wait(). func (g *NotificationGroup) Go(ctx context.Context, f func() error, index int, addr xnet.Host) { r := rand.New(rand.NewSource(time.Now().UnixNano())) g.workers.Take() go func() { defer g.workers.Give() g.errs[index] = NotificationPeerErr{
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Mon Sep 09 16:58:30 UTC 2024 - 46.2K bytes - Viewed (0) -
cmd/erasure-server-pool.go
// so various tasks will be useful bootstrapTrace("initAutoHeal", func() { initAutoHeal(GlobalContext, z) }) // initialize the object layer. defer setObjectLayer(z) r := rand.New(rand.NewSource(time.Now().UnixNano())) attempt := 1 for { var err error bootstrapTrace(fmt.Sprintf("poolMeta.Init: loading pool metadata, attempt: %d", attempt), func() {
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Sun Sep 29 22:40:36 UTC 2024 - 89.8K bytes - Viewed (0)