Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 127 for coroutines (0.05 sec)

  1. cmd/signals.go

    		}
    
    		os.Exit(1)
    	}
    
    	stopProcess := func() bool {
    		shutdownHealMRFWithTimeout() // this can take time sometimes, it needs to be executed
    		// before stopping s3 operations
    
    		// send signal to various go-routines that they need to quit.
    		cancelGlobalContext()
    
    		if httpServer := newHTTPServerFn(); httpServer != nil {
    			if err := httpServer.Shutdown(); err != nil && !errors.Is(err, http.ErrServerClosed) {
    Registered: Sun Dec 28 19:28:13 UTC 2025
    - Last Modified: Wed Sep 04 17:02:39 UTC 2024
    - 3.2K bytes
    - Viewed (0)
  2. internal/lru/lru.go

    	for i := 0; i < numBuckets; i++ {
    		res.buckets[i] = bucket[K, V]{entries: make(map[K]*Entry[K, V])}
    	}
    
    	// enable deleteExpired() running in separate goroutine for cache with non-zero TTL
    	//
    	// Important: done channel is never closed, so deleteExpired() goroutine will never exit,
    	// it's decided to add functionality to close it in the version later than v2.
    	if res.ttl != noEvictionTTL {
    		go func(done <-chan struct{}) {
    Registered: Sun Dec 28 09:35:17 UTC 2025
    - Last Modified: Fri Apr 25 08:22:26 UTC 2025
    - 12.5K bytes
    - Viewed (0)
  3. src/builtin/builtin.go

    // panicking goroutine. Executing a call to recover inside a deferred
    // function (but not any function called by it) stops the panicking sequence
    // by restoring normal execution and retrieves the error value passed to the
    // call of panic. If recover is called outside the deferred function it will
    // not stop a panicking sequence. In this case, or when the goroutine is not
    // panicking, recover returns nil.
    //
    Registered: Tue Dec 30 11:13:12 UTC 2025
    - Last Modified: Mon Dec 30 23:59:23 UTC 2024
    - 12.8K bytes
    - Viewed (0)
  4. cmd/shared-lock.go

    	ctx, cancel := context.WithCancel(context.Background())
    	go func() {
    		select {
    		case <-ctx1.Done():
    		case <-ctx2.Done():
    		// The lock acquirer decides to cancel, exit this goroutine
    		case <-ctx.Done():
    		}
    
    		cancel()
    	}()
    	return ctx, cancel
    }
    
    func (ld sharedLock) GetLock(ctx context.Context) (context.Context, context.CancelFunc) {
    	l := <-ld.lockContext
    Registered: Sun Dec 28 19:28:13 UTC 2025
    - Last Modified: Mon Feb 13 09:26:38 UTC 2023
    - 2.3K bytes
    - Viewed (0)
  5. internal/dsync/drwmutex_test.go

    	}
    	// Wait for all parallel RLock()s to succeed.
    	for range numReaders {
    		<-clocked
    	}
    	for range numReaders {
    		cunlock <- true
    	}
    	// Wait for the goroutines to finish.
    	for range numReaders {
    		<-cdone
    	}
    }
    
    // Borrowed from rwmutex_test.go
    func TestParallelReaders(t *testing.T) {
    	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1))
    Registered: Sun Dec 28 19:28:13 UTC 2025
    - Last Modified: Sun Sep 28 20:59:21 UTC 2025
    - 9.4K bytes
    - Viewed (0)
  6. src/arena/arena.go

    // it is no longer referenced, so it must be kept alive (see runtime.KeepAlive)
    // until any memory allocated from it is no longer needed.
    //
    // An Arena must never be used concurrently by multiple goroutines.
    type Arena struct {
    	a unsafe.Pointer
    }
    
    // NewArena allocates a new arena.
    func NewArena() *Arena {
    	return &Arena{a: runtime_arena_newArena()}
    }
    
    Registered: Tue Dec 30 11:13:12 UTC 2025
    - Last Modified: Wed Oct 12 20:23:36 UTC 2022
    - 4.3K bytes
    - Viewed (0)
  7. cmd/notification.go

    type NotificationPeerErr struct {
    	Host xnet.Host // Remote host on which the rpc call was initiated
    	Err  error     // Error returned by the remote peer for an rpc call
    }
    
    // A NotificationGroup is a collection of goroutines working on subtasks that are part of
    // the same overall task.
    //
    // A zero NotificationGroup is valid and does not cancel on error.
    type NotificationGroup struct {
    	workers    *workers.Workers
    Registered: Sun Dec 28 19:28:13 UTC 2025
    - Last Modified: Sun Sep 28 20:59:21 UTC 2025
    - 46K bytes
    - Viewed (0)
  8. internal/s3select/progress.go

    	closedMu sync.Mutex
    	closer   io.ReadCloser
    	closed   bool
    }
    
    func (pr *progressReader) Read(p []byte) (n int, err error) {
    	// This ensures that Close will block until Read has completed.
    	// This allows another goroutine to close the reader.
    	pr.closedMu.Lock()
    	defer pr.closedMu.Unlock()
    	if pr.closed {
    		return 0, errors.New("progressReader: read after Close")
    	}
    	return pr.processedReader.Read(p)
    }
    
    Registered: Sun Dec 28 19:28:13 UTC 2025
    - Last Modified: Sun Sep 22 00:33:43 UTC 2024
    - 4.3K bytes
    - Viewed (0)
  9. cmd/namespace-lock_test.go

    		// lk2
    		lk2ch := make(chan struct{})
    		go func() {
    			defer close(lk2ch)
    			nsLk.lock(ctx, "volume", "path", "source", "opsID", false, 1*time.Millisecond)
    		}()
    		time.Sleep(1 * time.Millisecond) // wait for goroutine to advance; ref=2
    
    		// Unlock the 1st lock; ref=1 after this line
    		nsLk.unlock("volume", "path", false)
    
    		// Taking another lockMapMutex here allows queuing up additional lockers. This should
    Registered: Sun Dec 28 19:28:13 UTC 2025
    - Last Modified: Fri Aug 29 02:39:48 UTC 2025
    - 3K bytes
    - Viewed (0)
  10. internal/lsync/lrwmutex_test.go

    	}
    	// Wait for all parallel RLock()s to succeed.
    	for range numReaders {
    		<-clocked
    	}
    	for range numReaders {
    		cunlock <- true
    	}
    	// Wait for the goroutines to finish.
    	for range numReaders {
    		<-cdone
    	}
    }
    
    // Borrowed from rwmutex_test.go
    func TestParallelReaders(t *testing.T) {
    	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1))
    Registered: Sun Dec 28 19:28:13 UTC 2025
    - Last Modified: Sun Sep 28 20:59:21 UTC 2025
    - 7.8K bytes
    - Viewed (0)
Back to top