Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 1,318 for releasem (0.41 sec)

  1. src/runtime/lockrank_off.go

    	acquirem()
    }
    
    func unlockWithRank(l *mutex) {
    	unlock2(l)
    }
    
    // This function may be called in nosplit context and thus must be nosplit.
    //
    //go:nosplit
    func releaseLockRankAndM(rank lockRank) {
    	releasem(getg().m)
    }
    
    func lockWithRankMayAcquire(l *mutex, rank lockRank) {
    }
    
    //go:nosplit
    func assertLockHeld(l *mutex) {
    }
    
    //go:nosplit
    func assertRankHeld(r lockRank) {
    }
    
    //go:nosplit
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Apr 22 14:29:04 UTC 2024
    - 1.2K bytes
    - Viewed (0)
  2. src/runtime/pinner.go

    func (p *Pinner) Pin(pointer any) {
    	if p.pinner == nil {
    		// Check the pinner cache first.
    		mp := acquirem()
    		if pp := mp.p.ptr(); pp != nil {
    			p.pinner = pp.pinnerCache
    			pp.pinnerCache = nil
    		}
    		releasem(mp)
    
    		if p.pinner == nil {
    			// Didn't get anything from the pinner cache.
    			p.pinner = new(pinner)
    			p.refs = p.refStore[:0]
    
    			// We set this finalizer once and never clear it. Thus, if the
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 04 14:29:45 UTC 2024
    - 11K bytes
    - Viewed (0)
  3. src/runtime/mgc.go

    		work.tMark = now
    
    		// Release the CPU limiter.
    		gcCPULimiter.finishGCTransition(now)
    	})
    
    	// Release the world sema before Gosched() in STW mode
    	// because we will need to reacquire it later but before
    	// this goroutine becomes runnable again, and we could
    	// self-deadlock otherwise.
    	semrelease(&worldsema)
    	releasem(mp)
    
    	// Make sure we block instead of returning to user code
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 16:25:21 UTC 2024
    - 62K bytes
    - Viewed (0)
  4. src/runtime/mheap.go

    	p := handle.Load()
    	if p == 0 {
    		releasem(mp)
    		return nil
    	}
    	// Be careful. p may or may not refer to valid memory anymore, as it could've been
    	// swept and released already. It's always safe to ensure a span is swept, though,
    	// even if it's just some random span.
    	span := spanOfHeap(p)
    	if span == nil {
    		// The span probably got swept and released.
    		releasem(mp)
    		return nil
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 78K bytes
    - Viewed (0)
  5. src/runtime/panic.go

    	if gp != mp.curg {
    		releasem(mp)
    		return false
    	}
    	// N.B. mp.locks != 1 instead of 0 to account for acquirem.
    	if mp.locks != 1 || mp.mallocing != 0 || mp.throwing != throwTypeNone || mp.preemptoff != "" || mp.dying != 0 {
    		releasem(mp)
    		return false
    	}
    	status := readgstatus(gp)
    	if status&^_Gscan != _Grunning || gp.syscallsp != 0 {
    		releasem(mp)
    		return false
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 43.8K bytes
    - Viewed (0)
  6. src/runtime/proc.go

    		unlock(&newmHandoff.lock)
    		// The M has not started yet, but the template thread does not
    		// participate in STW, so it will always process queued Ms and
    		// it is safe to releasem.
    		releasem(getg().m)
    		return
    	}
    	newm1(mp)
    	releasem(getg().m)
    }
    
    func newm1(mp *m) {
    	if iscgo {
    		var ts cgothreadstart
    		if _cgo_thread_start == nil {
    			throw("_cgo_thread_start missing")
    		}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 207.5K bytes
    - Viewed (0)
  7. src/runtime/lockrank_on.go

    				gp.m.locksHeldLen--
    				break
    			}
    		}
    		if !found {
    			println(gp.m.procid, ":", rank.String(), rank)
    			throw("lockRank release without matching lockRank acquire")
    		}
    	})
    
    	releasem(getg().m)
    }
    
    // nosplit because it may be called from nosplit contexts.
    //
    //go:nosplit
    func lockWithRankMayAcquire(l *mutex, rank lockRank) {
    	gp := getg()
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Apr 22 14:29:04 UTC 2024
    - 10.6K bytes
    - Viewed (0)
  8. src/runtime/arena.go

    		}
    	} else {
    		v, ok := s.userArenaChunkFree.takeFromFront(size, typ.Align_)
    		if ok {
    			ptr = unsafe.Pointer(v)
    		}
    	}
    	if ptr == nil {
    		// Failed to allocate.
    		mp.mallocing = 0
    		releasem(mp)
    		return nil
    	}
    	if s.needzero != 0 {
    		throw("arena chunk needs zeroing, but should already be zeroed")
    	}
    	// Set up heap bitmap and do extra accounting.
    	if typ.Pointers() {
    		if cap >= 0 {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:44:56 UTC 2024
    - 37.9K bytes
    - Viewed (0)
  9. src/cmd/compile/internal/test/inl_test.go

    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Apr 09 04:07:57 UTC 2024
    - 10.7K bytes
    - Viewed (0)
  10. src/runtime/symtab.go

    						break
    					} else {
    						cache.inUse--
    						releasem(mp)
    						return val, pc
    					}
    				}
    			}
    		} else if debugCheckCache && (cache.inUse < 1 || cache.inUse > 2) {
    			// Catch accounting errors or deeply reentrant use. In principle
    			// "inUse" should never exceed 2.
    			throw("cache.inUse out of range")
    		}
    		cache.inUse--
    		releasem(mp)
    	}
    
    	if !f.valid() {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 40K bytes
    - Viewed (0)
Back to top