- Sort Score
- Num 10 results
- Language All
Results 11 - 20 of 38 for SafeClose (0.07 seconds)
-
cmd/erasure.go
} } for _, idx := range permutes { b := buckets[idx] if e := oldCache.find(b.Name); e != nil { cache.replace(b.Name, dataUsageRoot, *e) bucketCh <- b } } xioutil.SafeClose(bucketCh) bucketResults := make(chan dataUsageEntryInfo, len(disks)) // Start async collector/saver. // This goroutine owns the cache. var saverWg sync.WaitGroup saverWg.Add(1) go func() {Created: Sun Dec 28 19:28:13 GMT 2025 - Last Modified: Sun Sep 28 20:59:21 GMT 2025 - 16.1K bytes - Click Count (0) -
cmd/batch-handlers.go
Marker: lastObject, Filter: selectObj, AskDisks: walkQuorum, }); err != nil { cancelCause(err) xioutil.SafeClose(walkCh) return } for obj := range prefixWalkCh { walkCh <- obj } } xioutil.SafeClose(walkCh) }() prevObj := "" skipReplicate := false for res := range slowCh { if res.Err != nil {Created: Sun Dec 28 19:28:13 GMT 2025 - Last Modified: Fri Aug 29 02:39:48 GMT 2025 - 63.5K bytes - Click Count (1) -
cmd/speedtest.go
} // Get the max throughput and iops numbers. func objectSpeedTest(ctx context.Context, opts speedTestOpts) chan madmin.SpeedTestResult { ch := make(chan madmin.SpeedTestResult, 1) go func() { defer xioutil.SafeClose(ch) concurrency := opts.concurrencyStart if opts.autotune { // if we have less drives than concurrency then choose // only the concurrency to be number of drives to start
Created: Sun Dec 28 19:28:13 GMT 2025 - Last Modified: Tue May 27 15:19:03 GMT 2025 - 9.2K bytes - Click Count (0) -
cmd/erasure-decode.go
if p.offset+p.shardSize > p.shardFileSize { p.shardSize = p.shardFileSize - p.offset } if p.shardSize == 0 { return newBuf, nil } readTriggerCh := make(chan bool, len(p.readers)) defer xioutil.SafeClose(readTriggerCh) // close the channel upon return for i := 0; i < p.dataBlocks; i++ { // Setup read triggers for p.dataBlocks number of reads so that it reads in parallel. readTriggerCh <- true }
Created: Sun Dec 28 19:28:13 GMT 2025 - Last Modified: Thu Aug 29 01:40:52 GMT 2024 - 9.5K bytes - Click Count (0) -
cmd/erasure-server-pool.go
disks, infos, _ := set.getOnlineDisksWithHealingAndInfo(true) if len(disks) == 0 { xioutil.SafeClose(results) err := fmt.Errorf("Walk: no online disks found in (set:%d pool:%d) %w", setIdx, poolIdx, errErasureReadQuorum) cancelCause(err) return err } go func() { defer xioutil.SafeClose(listOut) send := func(e metaCacheEntry) { if e.isDir() { // Ignore directories.
Created: Sun Dec 28 19:28:13 GMT 2025 - Last Modified: Sun Sep 28 20:59:21 GMT 2025 - 89.2K bytes - Click Count (0) -
internal/dsync/drwmutex.go
// Refreshing is canceled return false, ctx.Err() } if done { break } } // We may have some unused results in ch, release them async. go func() { wg.Wait() xioutil.SafeClose(ch) for range ch { } }() noQuorum := lockNotFound > len(restClnts)-quorum return noQuorum, nil } // lock tries to acquire the distributed lock, returning true or false.
Created: Sun Dec 28 19:28:13 GMT 2025 - Last Modified: Fri Aug 29 02:39:48 GMT 2025 - 20.3K bytes - Click Count (0) -
cmd/xl-storage-disk-id-check.go
if contextCanceled(ctx) { xioutil.SafeClose(updates) return dataUsageCache{}, ctx.Err() } if err := p.checkDiskStale(); err != nil { xioutil.SafeClose(updates) return dataUsageCache{}, err } weSleep := func() bool { return scannerIdleMode.Load() == 0 }
Created: Sun Dec 28 19:28:13 GMT 2025 - Last Modified: Fri Apr 25 05:41:04 GMT 2025 - 34.5K bytes - Click Count (0) -
cmd/bucket-replication.go
go p.AddLargeWorker(input, &p.activeLrgWorkers) } for len(p.lrgworkers) > n { worker := p.lrgworkers[len(p.lrgworkers)-1] p.lrgworkers = p.lrgworkers[:len(p.lrgworkers)-1] xioutil.SafeClose(worker) } } // ActiveWorkers returns the number of active workers handling replication traffic. func (p *ReplicationPool) ActiveWorkers() int { return int(atomic.LoadInt32(&p.activeWorkers)) }
Created: Sun Dec 28 19:28:13 GMT 2025 - Last Modified: Sun Sep 28 20:59:21 GMT 2025 - 118.2K bytes - Click Count (0) -
cmd/storage-rest-client.go
return nil } func (client *storageRESTClient) NSScanner(ctx context.Context, cache dataUsageCache, updates chan<- dataUsageEntry, scanMode madmin.HealScanMode, _ func() bool) (dataUsageCache, error) { defer xioutil.SafeClose(updates) st, err := storageNSScannerRPC.Call(ctx, client.gridConn, &nsScannerOptions{ DiskID: *client.diskID.Load(), ScanMode: int(scanMode), Cache: &cache, }) if err != nil {
Created: Sun Dec 28 19:28:13 GMT 2025 - Last Modified: Fri Aug 08 02:38:41 GMT 2025 - 30.4K bytes - Click Count (0) -
internal/logger/target/kafka/kafka.go
h.storeCtxCancel() } // Set logch to nil and close it. // This will block all Send operations, // and finish the existing ones. // All future ones will be discarded. h.logChMu.Lock() xioutil.SafeClose(h.logCh) h.logCh = nil h.logChMu.Unlock() if h.producer != nil { h.producer.Close() h.client.Close() } // Wait for messages to be sent... h.wg.Wait() }
Created: Sun Dec 28 19:28:13 GMT 2025 - Last Modified: Sun Sep 28 20:59:21 GMT 2025 - 10.2K bytes - Click Count (0)