- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 247 for lifo (0.05 sec)
-
src/sync/runtime.go
// the reason for waiting in a backtrace, and is used to compute some metrics. // Otherwise they're functionally identical. func runtime_SemacquireMutex(s *uint32, lifo bool, skipframes int) func runtime_SemacquireRWMutexR(s *uint32, lifo bool, skipframes int) func runtime_SemacquireRWMutex(s *uint32, lifo bool, skipframes int) // Semrelease atomically increments *s and notifies a waiting goroutine // if one is blocked in Semacquire.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Sep 16 16:32:27 UTC 2022 - 2.3K bytes - Viewed (0) -
src/runtime/sema.go
func sync_runtime_SemacquireMutex(addr *uint32, lifo bool, skipframes int) { semacquire1(addr, lifo, semaBlockProfile|semaMutexProfile, skipframes, waitReasonSyncMutexLock) } //go:linkname sync_runtime_SemacquireRWMutexR sync.runtime_SemacquireRWMutexR func sync_runtime_SemacquireRWMutexR(addr *uint32, lifo bool, skipframes int) { semacquire1(addr, lifo, semaBlockProfile|semaMutexProfile, skipframes, waitReasonSyncRWMutexRLock)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 19K bytes - Viewed (0) -
src/runtime/lfstack_test.go
node = toMyNode(LFStackPop(stack)) if node == nil { t.Fatalf("stack is empty") } if node.data != 43 { t.Fatalf("no lifo") } // Pop another. node = toMyNode(LFStackPop(stack)) if node == nil { t.Fatalf("stack is empty") } if node.data != 42 { t.Fatalf("no lifo") } // Check the stack is empty again. if LFStackPop(stack) != nil { t.Fatalf("stack is not empty") }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Nov 17 23:12:04 UTC 2022 - 2.7K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/layout.go
posdegree := f.newSparseSet(f.NumBlocks()) // blocks with positive remaining degree defer f.retSparseSet(posdegree) // blocks with zero remaining degree. Use slice to simulate a LIFO queue to implement // the depth-first topology sorting algorithm. var zerodegree []ID // LIFO queue. Track the successor blocks of the scheduled block so that when we // encounter loops, we choose to schedule the successor block of the most recently // scheduled block.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Oct 31 21:41:20 UTC 2022 - 5K bytes - Viewed (0) -
tests/util/leak/check.go
// Check adds a check to a test to ensure there are no leaked goroutines // To use, simply call leak.Check(t) at the start of a test; Do not call it in defer. // It is recommended to call this as the first step, as Cleanup is called in LIFO order; this ensures any // Cleanup's called in the test happen first. // Any existing goroutines before the test starts are filtered out. This ensures a single test failing doesn't
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Wed Dec 20 10:22:38 UTC 2023 - 6.9K bytes - Viewed (0) -
android/guava/src/com/google/common/io/Closer.java
* additional exceptions that are thrown after that will be suppressed. */ @Override public void close() throws IOException { Throwable throwable = thrown; // close closeables in LIFO order while (!stack.isEmpty()) { Closeable closeable = stack.removeFirst(); try { closeable.close(); } catch (Throwable e) { if (throwable == null) {
Registered: Wed Jun 12 16:38:11 UTC 2024 - Last Modified: Tue May 07 15:26:58 UTC 2024 - 10.4K bytes - Viewed (0) -
guava/src/com/google/common/io/Closer.java
* additional exceptions that are thrown after that will be suppressed. */ @Override public void close() throws IOException { Throwable throwable = thrown; // close closeables in LIFO order while (!stack.isEmpty()) { Closeable closeable = stack.removeFirst(); try { closeable.close(); } catch (Throwable e) { if (throwable == null) {
Registered: Wed Jun 12 16:38:11 UTC 2024 - Last Modified: Tue May 07 15:26:58 UTC 2024 - 10.4K bytes - Viewed (0) -
src/runtime/mgcstack.go
// This applies only to the innermost frame at an async safe-point. conservative bool // buf contains the set of possible pointers to stack objects. // Organized as a LIFO linked list of buffers. // All buffers except possibly the head buffer are full. buf *stackWorkBuf freeBuf *stackWorkBuf // keep around one free buffer for allocation hysteresis
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Aug 21 21:06:52 UTC 2023 - 10.6K bytes - Viewed (0) -
guava/src/com/google/common/util/concurrent/CycleDetectingLockFactory.java
ArrayList<LockGraphNode> acquiredLockList = requireNonNull(acquiredLocks.get()); LockGraphNode node = lock.getLockGraphNode(); // Iterate in reverse because locks are usually locked/unlocked in a // LIFO order. for (int i = acquiredLockList.size() - 1; i >= 0; i--) { if (acquiredLockList.get(i) == node) { acquiredLockList.remove(i); break; } } } }
Registered: Wed Jun 12 16:38:11 UTC 2024 - Last Modified: Fri Dec 15 19:31:54 UTC 2023 - 35.9K bytes - Viewed (0) -
android/guava/src/com/google/common/util/concurrent/CycleDetectingLockFactory.java
ArrayList<LockGraphNode> acquiredLockList = requireNonNull(acquiredLocks.get()); LockGraphNode node = lock.getLockGraphNode(); // Iterate in reverse because locks are usually locked/unlocked in a // LIFO order. for (int i = acquiredLockList.size() - 1; i >= 0; i--) { if (acquiredLockList.get(i) == node) { acquiredLockList.remove(i); break; } } } }
Registered: Wed Jun 12 16:38:11 UTC 2024 - Last Modified: Fri Dec 15 19:31:54 UTC 2023 - 35.9K bytes - Viewed (0)