- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of about 10,000 for acquired (0.29 sec)
-
src/runtime/lockrank.go
return "BAD RANK" } return lockNames[rank] } // lockPartialOrder is the transitive closure of the lock rank graph. // An entry for rank X lists all of the ranks that can already be held // when rank X is acquired. // // Lock ranks that allow self-cycles list themselves. var lockPartialOrder [][]lockRank = [][]lockRank{ lockRankSysmon: {}, lockRankScavenge: {lockRankSysmon},
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:47:01 UTC 2024 - 19.9K bytes - Viewed (0) -
src/sync/rwmutex.go
// Wait for active readers. if r != 0 && rw.readerWait.Add(r) != 0 { runtime_SemacquireRWMutex(&rw.writerSem, false, 0) } if race.Enabled { race.Enable() race.Acquire(unsafe.Pointer(&rw.readerSem)) race.Acquire(unsafe.Pointer(&rw.writerSem)) } } // TryLock tries to lock rw for writing and reports whether it succeeded. // // Note that while correct uses of TryLock do exist, they are rare,
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 21:14:51 UTC 2024 - 7.2K bytes - Viewed (0) -
platforms/core-runtime/base-services/src/test/groovy/org/gradle/internal/resources/DefaultResourceLockCoordinationServiceTest.groovy
coordinationService.notifyStateChange() } then: 1 * listener.execute(lock) 0 * listener._ } def "does not notify listener when lock is acquired and released in single action"() { def listener = Mock(Action) coordinationService.addLockReleaseListener(listener) def lock = resourceLock("lock1") when:
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Thu Apr 11 17:16:10 UTC 2024 - 15.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compile_on_demand_op.cc
// will retain references, but this is more obviously correct.) core::ScopedUnref xla_device_compiler_ref(xla_device_compiler); core::ScopedUnref profiler_ref(profiler); // Locks are acquired again when populating the `ctx` outputs. OP_REQUIRES_OK( ctx, Run(variable_args, result, xla_device_compiler, executable, ctx)); } }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 29 08:39:39 UTC 2024 - 13.4K bytes - Viewed (0) -
src/cmd/vendor/golang.org/x/telemetry/internal/upload/reports.go
return "", nil } // exclusiveWrite attempts to create filename exclusively, and if successful, // writes content to the resulting file handle. // // It returns a boolean indicating whether the exclusive handle was acquired, // and an error indicating whether the operation succeeded. // If the file already exists, exclusiveWrite returns (false, nil). func exclusiveWrite(filename string, content []byte) (_ bool, rerr error) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jun 04 14:52:56 UTC 2024 - 10.3K bytes - Viewed (0) -
okhttp/src/main/kotlin/okhttp3/internal/connection/RealRoutePlanner.kt
} else -> null } } // If the call's connection wasn't released, reuse it. We don't call connectionAcquired() here // because we already acquired it. if (connectionUser.candidateConnection() != null) { check(toClose == null) return ReusePlan(candidate) } // The call's connection was released. toClose?.closeQuietly()
Registered: Sun Jun 16 04:42:17 UTC 2024 - Last Modified: Sat Apr 20 17:03:43 UTC 2024 - 12K bytes - Viewed (0) -
platforms/core-execution/persistent-cache/src/main/java/org/gradle/cache/internal/DefaultCacheCoordinator.java
return new BTreePersistentIndexedCache<>(cacheFile, keySerializer, valueSerializer); } /** * Called just after the file lock has been acquired. */ private void afterLockAcquire(FileLock fileLock) { assert this.fileLock == null; this.fileLock = fileLock; this.stateAtOpen = fileLock.getState();
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Wed May 01 12:21:15 UTC 2024 - 20.5K bytes - Viewed (0) -
src/runtime/tracecpu.go
trace.cpuLogRead[0] = newProfBuf(3, profBufWordCount, profBufTagCount) trace.cpuLogRead[1] = newProfBuf(3, profBufWordCount, profBufTagCount) // We must not acquire trace.signalLock outside of a signal handler: a // profiling signal may arrive at any time and try to acquire it, leading to // deadlock. Because we can't use that lock to protect updates to // trace.cpuLogWrite (only use of the structure it references), reads and
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 15 17:03:35 UTC 2024 - 8.7K bytes - Viewed (0) -
src/cmd/vendor/golang.org/x/telemetry/internal/counter/counter.go
switch { case !state.locked() && state.havePtr(): if !c.state.update(&state, state.incReader()) { continue } // Counter unlocked or counter shared; has an initialized count pointer; acquired shared lock. if c.ptr.count == nil { for !c.state.update(&state, state.addExtra(uint64(n))) { // keep trying - we already took the reader lock state = c.state.load() }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jun 04 16:19:04 UTC 2024 - 10.3K bytes - Viewed (0) -
src/runtime/tracestack.go
gp = mp.curg } // Double-check that we own the stack we're about to trace. if debug.traceCheckStackOwnership != 0 && gp != nil { status := readgstatus(gp) // If the scan bit is set, assume we're the ones that acquired it. if status&_Gscan == 0 { // Use the trace status to check this. There are a number of cases // where a running goroutine might be in _Gwaiting, and these cases
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 21 14:38:56 UTC 2024 - 11K bytes - Viewed (0)