- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 3,036 for releasem (0.16 sec)
-
src/runtime/debuglog_on.go
l = mp.dlogCache mp.dlogCache = nil } releasem(mp) return l } // putCachedDlogger attempts to return l to the local cache. It // returns false if this fails. func putCachedDlogger(l *dlogger) bool { mp := acquirem() if getg() != mp.gsignal && mp.dlogCache == nil { mp.dlogCache = l releasem(mp) return true } releasem(mp) return false
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 28 18:17:57 UTC 2021 - 1.1K bytes - Viewed (0) -
src/runtime/lock_js.go
notes[n] = gp notesWithTimeout[n] = noteWithTimeout{gp: gp, deadline: deadline} releasem(mp) gopark(nil, nil, waitReasonSleep, traceBlockSleep, 1) clearTimeoutEvent(id) // note might have woken early, clear timeout mp = acquirem() delete(notes, n) delete(notesWithTimeout, n) releasem(mp) return n.key == note_woken } for n.key != note_woken { mp := acquirem()
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Nov 21 21:02:20 UTC 2023 - 7.3K bytes - Viewed (0) -
src/runtime/lockrank_off.go
acquirem() } func unlockWithRank(l *mutex) { unlock2(l) } // This function may be called in nosplit context and thus must be nosplit. // //go:nosplit func releaseLockRankAndM(rank lockRank) { releasem(getg().m) } func lockWithRankMayAcquire(l *mutex, rank lockRank) { } //go:nosplit func assertLockHeld(l *mutex) { } //go:nosplit func assertRankHeld(r lockRank) { } //go:nosplit
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 22 14:29:04 UTC 2024 - 1.2K bytes - Viewed (0) -
src/runtime/pinner.go
func (p *Pinner) Pin(pointer any) { if p.pinner == nil { // Check the pinner cache first. mp := acquirem() if pp := mp.p.ptr(); pp != nil { p.pinner = pp.pinnerCache pp.pinnerCache = nil } releasem(mp) if p.pinner == nil { // Didn't get anything from the pinner cache. p.pinner = new(pinner) p.refs = p.refStore[:0] // We set this finalizer once and never clear it. Thus, if the
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 04 14:29:45 UTC 2024 - 11K bytes - Viewed (0) -
src/runtime/mgc.go
work.tMark = now // Release the CPU limiter. gcCPULimiter.finishGCTransition(now) }) // Release the world sema before Gosched() in STW mode // because we will need to reacquire it later but before // this goroutine becomes runnable again, and we could // self-deadlock otherwise. semrelease(&worldsema) releasem(mp) // Make sure we block instead of returning to user code
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 16:25:21 UTC 2024 - 62K bytes - Viewed (0) -
src/runtime/mheap.go
p := handle.Load() if p == 0 { releasem(mp) return nil } // Be careful. p may or may not refer to valid memory anymore, as it could've been // swept and released already. It's always safe to ensure a span is swept, though, // even if it's just some random span. span := spanOfHeap(p) if span == nil { // The span probably got swept and released. releasem(mp) return nil }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 78K bytes - Viewed (0) -
src/runtime/malloc_test.go
aligned := false for i := 0; i < 16; i++ { x := runtime.Escape(new(byte)) if uintptr(unsafe.Pointer(x))&0xf == 0xf { aligned = true break } } if !aligned { runtime.Releasem() t.Fatal("unable to get a fresh tiny slot") } // Create a 4-byte object so that the current // tiny slot is partially filled. runtime.Escape(new(uint32))
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Sep 05 23:35:29 UTC 2023 - 10.6K bytes - Viewed (0) -
src/runtime/panic.go
if gp != mp.curg { releasem(mp) return false } // N.B. mp.locks != 1 instead of 0 to account for acquirem. if mp.locks != 1 || mp.mallocing != 0 || mp.throwing != throwTypeNone || mp.preemptoff != "" || mp.dying != 0 { releasem(mp) return false } status := readgstatus(gp) if status&^_Gscan != _Grunning || gp.syscallsp != 0 { releasem(mp) return false }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 43.8K bytes - Viewed (0) -
src/runtime/proc.go
unlock(&newmHandoff.lock) // The M has not started yet, but the template thread does not // participate in STW, so it will always process queued Ms and // it is safe to releasem. releasem(getg().m) return } newm1(mp) releasem(getg().m) } func newm1(mp *m) { if iscgo { var ts cgothreadstart if _cgo_thread_start == nil { throw("_cgo_thread_start missing") }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 207.5K bytes - Viewed (0) -
src/runtime/lockrank_on.go
gp.m.locksHeldLen-- break } } if !found { println(gp.m.procid, ":", rank.String(), rank) throw("lockRank release without matching lockRank acquire") } }) releasem(getg().m) } // nosplit because it may be called from nosplit contexts. // //go:nosplit func lockWithRankMayAcquire(l *mutex, rank lockRank) { gp := getg()
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 22 14:29:04 UTC 2024 - 10.6K bytes - Viewed (0)