- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 227 for lock1 (0.05 sec)
-
pkg/kubelet/pleg/generic.go
podsToReinspect map[types.UID]*kubecontainer.Pod // Stop the Generic PLEG by closing the channel. stopCh chan struct{} // Locks the relisting of the Generic PLEG relistLock sync.Mutex // Indicates if the Generic PLEG is running or not isRunning bool // Locks the start/stop operation of Generic PLEG runningMu sync.Mutex // Indicates relisting related parameters relistDuration *RelistDuration
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Jun 04 06:25:43 UTC 2024 - 19.3K bytes - Viewed (0) -
src/runtime/iface.go
// This is by far the most common case, so do it without locks. // Use atomic to ensure we see any previous writes done by the thread // that updates the itabTable field (with atomic.Storep in itabAdd). t := (*itabTableType)(atomic.Loadp(unsafe.Pointer(&itabTable))) if m = t.find(inter, typ); m != nil { goto finish } // Not found. Grab the lock and try again. lock(&itabLock) if m = itabTable.find(inter, typ); m != nil {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 22.5K bytes - Viewed (0) -
plugin/pkg/auth/authorizer/node/graph.go
// svcacct -> pod func (g *Graph) AddPod(pod *corev1.Pod) { start := time.Now() defer func() { graphActionsDuration.WithLabelValues("AddPod").Observe(time.Since(start).Seconds()) }() g.lock.Lock() defer g.lock.Unlock() g.deleteVertex_locked(podVertexType, pod.Namespace, pod.Name) podVertex := g.getOrCreateVertex_locked(podVertexType, pod.Namespace, pod.Name)
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Thu Mar 07 21:22:55 UTC 2024 - 17.5K bytes - Viewed (0) -
staging/src/k8s.io/apiserver/pkg/util/proxy/streamtunnel.go
u.lock.RLock() defer u.lock.RUnlock() if u.conn != nil { return u.conn.Read(b) } if u.err != nil { return 0, u.err } // return empty read without blocking until we are initialized return 0, nil } func (u *tunnelingWebsocketUpgraderConn) Write(b []byte) (n int, err error) { u.lock.RLock() defer u.lock.RUnlock() if u.conn != nil {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Mon Mar 04 19:10:30 UTC 2024 - 14.7K bytes - Viewed (0) -
platforms/core-runtime/launcher/src/main/java/org/gradle/launcher/daemon/server/DaemonStateCoordinator.java
} } finally { lock.unlock(); } } private void onCommandFailed(Throwable failure) { lock.lock(); try { result = failure; condition.signalAll(); } finally { lock.unlock(); } } private void onCommandSuccessful() { lock.lock(); try { result = this;
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Wed May 29 06:47:38 UTC 2024 - 17.1K bytes - Viewed (0) -
internal/bucket/object/lock/lock_test.go
"x-amz-object-lock-mode": "governance", "x-amz-object-lock-retain-until-date": "2020-02-01", }, expected: map[string]string{}, filterRetention: true, filterLegalHold: true, }, { metadata: map[string]string{ "x-amz-object-lock-legal-hold": "on", "x-amz-object-lock-mode": "governance",
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Thu Feb 22 06:26:06 UTC 2024 - 17.1K bytes - Viewed (0) -
src/runtime/netpoll.go
// pd can't be shared here, but lock anyhow because // that's what publishInfo documents. lock(&pd.lock) // Increment the fdseq field, so that any currently // running netpoll calls will not mark pd as ready. fdseq := pd.fdseq.Load() fdseq = (fdseq + 1) & (1<<taggedPointerBits - 1) pd.fdseq.Store(fdseq) pd.publishInfo() unlock(&pd.lock) lock(&c.lock) pd.link = c.first c.first = pd
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 19:57:43 UTC 2024 - 20.7K bytes - Viewed (0) -
src/syscall/exec_plan9.go
// with [ForkExec]/[StartProcess] to wait for a // running process to exit. func WaitProcess(pid int, w *Waitmsg) (err error) { procs.Lock() ch := procs.waits[pid] procs.Unlock() var wmsg *waitErr if ch != nil { wmsg = <-ch procs.Lock() if procs.waits[pid] == ch { delete(procs.waits, pid) } procs.Unlock() } if wmsg == nil { // ch was missing or ch is closed
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Feb 26 21:03:59 UTC 2024 - 13.3K bytes - Viewed (0) -
src/runtime/mgcwork.go
} } // Record that this may acquire the wbufSpans or heap lock to // allocate a workbuf. lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans) lockWithRankMayAcquire(&mheap_.lock, lockRankMheap) if b == nil { // Allocate more workbufs. var s *mspan if work.wbufSpans.free.first != nil { lock(&work.wbufSpans.lock) s = work.wbufSpans.free.first if s != nil {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 12.9K bytes - Viewed (0) -
internal/ringbuffer/ring_buffer.go
// ErrIsNotEmpty is returned when the buffer is not empty and not blocking. ErrIsNotEmpty = errors.New("ringbuffer is not empty") // ErrAcquireLock is returned when the lock is not acquired on Try operations. ErrAcquireLock = errors.New("unable to acquire lock") // ErrWriteOnClosed is returned when write on a closed ringbuffer. ErrWriteOnClosed = errors.New("write on closed ringbuffer") )
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Wed May 15 00:11:04 UTC 2024 - 13.3K bytes - Viewed (0)