Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 3,332 for acquired (3.84 sec)

  1. src/sync/rwmutex.go

    	// Wait for active readers.
    	if r != 0 && rw.readerWait.Add(r) != 0 {
    		runtime_SemacquireRWMutex(&rw.writerSem, false, 0)
    	}
    	if race.Enabled {
    		race.Enable()
    		race.Acquire(unsafe.Pointer(&rw.readerSem))
    		race.Acquire(unsafe.Pointer(&rw.writerSem))
    	}
    }
    
    // TryLock tries to lock rw for writing and reports whether it succeeded.
    //
    // Note that while correct uses of TryLock do exist, they are rare,
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jun 07 21:14:51 UTC 2024
    - 7.2K bytes
    - Viewed (0)
  2. src/cmd/vendor/golang.org/x/telemetry/internal/upload/reports.go

    	return "", nil
    }
    
    // exclusiveWrite attempts to create filename exclusively, and if successful,
    // writes content to the resulting file handle.
    //
    // It returns a boolean indicating whether the exclusive handle was acquired,
    // and an error indicating whether the operation succeeded.
    // If the file already exists, exclusiveWrite returns (false, nil).
    func exclusiveWrite(filename string, content []byte) (_ bool, rerr error) {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Jun 04 14:52:56 UTC 2024
    - 10.3K bytes
    - Viewed (0)
  3. src/cmd/vendor/golang.org/x/telemetry/internal/counter/counter.go

    		switch {
    		case !state.locked() && state.havePtr():
    			if !c.state.update(&state, state.incReader()) {
    				continue
    			}
    			// Counter unlocked or counter shared; has an initialized count pointer; acquired shared lock.
    			if c.ptr.count == nil {
    				for !c.state.update(&state, state.addExtra(uint64(n))) {
    					// keep trying - we already took the reader lock
    					state = c.state.load()
    				}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Jun 04 16:19:04 UTC 2024
    - 10.3K bytes
    - Viewed (0)
  4. src/runtime/tracestack.go

    		gp = mp.curg
    	}
    
    	// Double-check that we own the stack we're about to trace.
    	if debug.traceCheckStackOwnership != 0 && gp != nil {
    		status := readgstatus(gp)
    		// If the scan bit is set, assume we're the ones that acquired it.
    		if status&_Gscan == 0 {
    			// Use the trace status to check this. There are a number of cases
    			// where a running goroutine might be in _Gwaiting, and these cases
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue May 21 14:38:56 UTC 2024
    - 11K bytes
    - Viewed (0)
  5. src/runtime/traceruntime.go

    //
    //go:nosplit
    func traceAcquireEnabled() traceLocker {
    	// Any time we acquire a traceLocker, we may flush a trace buffer. But
    	// buffer flushes are rare. Record the lock edge even if it doesn't happen
    	// this time.
    	lockRankMayTraceFlush()
    
    	// Prevent preemption.
    	mp := acquirem()
    
    	// Acquire the trace seqlock. This prevents traceAdvance from moving forward
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 25.7K bytes
    - Viewed (0)
  6. pkg/kubelet/util/manager/watch_based_manager.go

    }
    
    func (c *objectCache) startRecycleIdleWatch() {
    	c.lock.Lock()
    	defer c.lock.Unlock()
    
    	for key, item := range c.items {
    		if item.stopIfIdle(c.clock.Now(), c.maxIdleTime) {
    			klog.V(4).InfoS("Not acquired for long time, Stopped watching for changes", "objectKey", key, "maxIdleTime", c.maxIdleTime)
    		}
    	}
    }
    
    func (c *objectCache) shutdownWhenStopped(stopCh <-chan struct{}) {
    	<-stopCh
    
    	c.lock.Lock()
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Tue Jun 04 06:25:43 UTC 2024
    - 11.6K bytes
    - Viewed (0)
  7. staging/src/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go

    func (w *watchCache) List() []interface{} {
    	return w.store.List()
    }
    
    // waitUntilFreshAndBlock waits until cache is at least as fresh as given <resourceVersion>.
    // NOTE: This function acquired lock and doesn't release it.
    // You HAVE TO explicitly call w.RUnlock() after this function.
    func (w *watchCache) waitUntilFreshAndBlock(ctx context.Context, resourceVersion uint64) error {
    	startTime := w.clock.Now()
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Tue Jun 11 10:20:57 UTC 2024
    - 26.2K bytes
    - Viewed (0)
  8. pkg/scheduler/internal/queue/scheduling_queue.go

    		p.movePodsToActiveOrBackoffQueue(logger, p.getUnschedulablePodsWithMatchingAffinityTerm(logger, newPod), AssignedPodUpdate, oldPod, newPod)
    	}
    	p.lock.Unlock()
    }
    
    // NOTE: this function assumes a lock has been acquired in the caller.
    // moveAllToActiveOrBackoffQueue moves all pods from unschedulablePods to activeQ or backoffQ.
    // This function adds all pods and then signals the condition variable to ensure that
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Wed Jun 12 13:26:09 UTC 2024
    - 61.4K bytes
    - Viewed (0)
  9. internal/ringbuffer/ring_buffer.go

    	// ErrIsNotEmpty is returned when the buffer is not empty and not blocking.
    	ErrIsNotEmpty = errors.New("ringbuffer is not empty")
    
    	// ErrAcquireLock is returned when the lock is not acquired on Try operations.
    	ErrAcquireLock = errors.New("unable to acquire lock")
    
    	// ErrWriteOnClosed is returned when write on a closed ringbuffer.
    	ErrWriteOnClosed = errors.New("write on closed ringbuffer")
    )
    
    Registered: Sun Jun 16 00:44:34 UTC 2024
    - Last Modified: Wed May 15 00:11:04 UTC 2024
    - 13.3K bytes
    - Viewed (0)
  10. src/testing/benchmark.go

    type PB struct {
    	globalN *atomic.Uint64 // shared between all worker goroutines iteration counter
    	grain   uint64         // acquire that many iterations from globalN at once
    	cache   uint64         // local cache of acquired iterations
    	bN      uint64         // total number of iterations to execute (b.N)
    }
    
    // Next reports whether there are more iterations to execute.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 01:00:11 UTC 2024
    - 23.9K bytes
    - Viewed (0)
Back to top