- Sort Score
- Result 10 results
- Languages All
Results 1 - 8 of 8 for defBlock (0.17 sec)
-
src/runtime/mklockrank.go
// select. "hchan": true, // Multiple hchanLeafs are acquired in hchan.sortkey() order in // syncadjustsudogs(). "hchanLeaf": true, // The point of the deadlock lock is to deadlock. "deadlock": true, } func main() { flagO := flag.String("o", "", "write to `file` instead of stdout") flagDot := flag.Bool("dot", false, "emit graphviz output instead of Go") flag.Parse()
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:47:01 UTC 2024 - 9.1K bytes - Viewed (0) -
platforms/core-runtime/logging/src/main/java/org/gradle/internal/logging/console/ProgressBar.java
Future<?> ignored = deadlockPreventer.submit(new Runnable() { @Override public void run() { // do not do this directly or a deadlock happens // to prevent that deadlock, execute it separately in another thread LOGGER.warn("More progress was logged than there should be ({} > {})", current, total); } }); }
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Tue May 28 13:09:37 UTC 2024 - 5.7K bytes - Viewed (0) -
src/sync/once.go
// arguments to a function to be invoked by Do: // // config.once.Do(func() { config.init(filename) }) // // Because no call to Do returns until the one call to f returns, if f causes // Do to be called, it will deadlock. // // If f panics, Do considers it to have returned; future calls of Do return // without calling f. func (o *Once) Do(f func()) { // Note: Here is an incorrect implementation of Do: //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 21:14:51 UTC 2024 - 2.5K bytes - Viewed (0) -
pkg/kube/krt/fetch.go
ff := func(o []Event[T], initialSync bool) { f(slices.Map(o, castEvent[T, any]), initialSync) } // Skip calling all the existing state for secondary dependencies, otherwise we end up with a deadlock due to // rerunning the same collection's recomputation at the same time (once for the initial event, then for the initial registration). c.RegisterBatch(ff, false) }) // Now we can do the real fetching
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Fri May 10 23:33:56 UTC 2024 - 2.6K bytes - Viewed (0) -
src/runtime/time_fake.go
func time_now() (sec int64, nsec int32, mono int64) { return faketime / 1e9, int32(faketime % 1e9), faketime } // write is like the Unix write system call. // We have to avoid write barriers to avoid potential deadlock // on write calls. // //go:nowritebarrierrec func write(fd uintptr, p unsafe.Pointer, n int32) int32 { if !(fd == 1 || fd == 2) { // Do an ordinary write. return write1(fd, p, n) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:15:13 UTC 2024 - 2.5K bytes - Viewed (0) -
src/runtime/rwmutex.go
const rwmutexMaxReaders = 1 << 30 // rlock locks rw for reading. func (rw *rwmutex) rlock() { // The reader must not be allowed to lose its P or else other // things blocking on the lock may consume all of the Ps and // deadlock (issue #20903). Alternatively, we could drop the P // while sleeping. acquireLockRankAndM(rw.readRank) lockWithRankMayAcquire(&rw.rLock, getLockRank(&rw.rLock)) if rw.readerCount.Add(1) < 0 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 22 14:29:04 UTC 2024 - 5K bytes - Viewed (0) -
src/runtime/coro.go
gnext = next.ptr() break } } // Check if we're switching to ourselves. This case is able to break our // thread-lock invariants and an unbuffered channel implementation of // coroswitch would deadlock. It's clear that this case should just not // work. if gnext == gp { throw("coroswitch of a goroutine to itself") } // Emit the trace event after getting gnext but before changing curg.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 19:09:18 UTC 2024 - 7.4K bytes - Viewed (0) -
src/runtime/tracecpu.go
trace.cpuLogRead[1] = newProfBuf(3, profBufWordCount, profBufTagCount) // We must not acquire trace.signalLock outside of a signal handler: a // profiling signal may arrive at any time and try to acquire it, leading to // deadlock. Because we can't use that lock to protect updates to // trace.cpuLogWrite (only use of the structure it references), reads and // writes of the pointer must be atomic. (And although this field is never
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 15 17:03:35 UTC 2024 - 8.7K bytes - Viewed (0)