Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 246 for acquirem (0.28 sec)

  1. src/runtime/pinner.go

    // It's safe to call Pin on non-Go pointers, in which case Pin will do nothing.
    func (p *Pinner) Pin(pointer any) {
    	if p.pinner == nil {
    		// Check the pinner cache first.
    		mp := acquirem()
    		if pp := mp.p.ptr(); pp != nil {
    			p.pinner = pp.pinnerCache
    			pp.pinnerCache = nil
    		}
    		releasem(mp)
    
    		if p.pinner == nil {
    			// Didn't get anything from the pinner cache.
    			p.pinner = new(pinner)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 04 14:29:45 UTC 2024
    - 11K bytes
    - Viewed (0)
  2. src/runtime/lockrank_on.go

    	}
    }
    
    // acquireLockRankAndM acquires a rank which is not associated with a mutex
    // lock. To maintain the invariant that an M with m.locks==0 does not hold any
    // lock-like resources, it also acquires the M.
    //
    // This function may be called in nosplit context and thus must be nosplit.
    //
    //go:nosplit
    func acquireLockRankAndM(rank lockRank) {
    	acquirem()
    
    	gp := getg()
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Apr 22 14:29:04 UTC 2024
    - 10.6K bytes
    - Viewed (0)
  3. src/cmd/compile/internal/test/inl_test.go

    	// be inlinable. If they have no callers in their packages, they
    	// might not actually be inlined anywhere.
    	want := map[string][]string{
    		"runtime": {
    			"add",
    			"acquirem",
    			"add1",
    			"addb",
    			"adjustpanics",
    			"adjustpointer",
    			"alignDown",
    			"alignUp",
    			"bucketMask",
    			"bucketShift",
    			"chanbuf",
    			"evacuated",
    			"fastlog2",
    			"float64bits",
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Apr 09 04:07:57 UTC 2024
    - 10.7K bytes
    - Viewed (0)
  4. src/runtime/trace.go

    	// events to indicate whether a P exists, rather than just making its
    	// existence implicit.
    	mp = acquirem()
    	for _, pp := range allp[len(allp):cap(allp)] {
    		pp.trace.readyNextGen(traceNextGen(gen))
    	}
    	releasem(mp)
    
    	if stopTrace {
    		// Acquire the shutdown sema to begin the shutdown process.
    		semacquire(&traceShutdownSema)
    
    		// Finish off CPU profile reading.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 21:17:41 UTC 2024
    - 37.1K bytes
    - Viewed (0)
  5. src/runtime/panic.go

    //go:nosplit
    func canpanic() bool {
    	gp := getg()
    	mp := acquirem()
    
    	// Is it okay for gp to panic instead of crashing the program?
    	// Yes, as long as it is running Go code, not runtime code,
    	// and not stuck in a system call.
    	if gp != mp.curg {
    		releasem(mp)
    		return false
    	}
    	// N.B. mp.locks != 1 instead of 0 to account for acquirem.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 43.8K bytes
    - Viewed (0)
  6. src/runtime/mgcwork.go

    // the garbage collector from transitioning to mark termination since
    // gcWork may locally hold GC work buffers. This can be done by
    // disabling preemption (systemstack or acquirem).
    type gcWork struct {
    	// wbuf1 and wbuf2 are the primary and secondary work buffers.
    	//
    	// This can be thought of as a stack of both work buffers'
    	// pointers concatenated. When we pop the last pointer, we
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 12.9K bytes
    - Viewed (0)
  7. src/runtime/malloc_test.go

    	// and again to make sure we finish the sweep phase.
    	runtime.GC()
    	runtime.GC()
    
    	// Disable preemption so we stay on one P's tiny allocator and
    	// nothing else allocates from it.
    	runtime.Acquirem()
    
    	// Make 1-byte allocations until we get a fresh tiny slot.
    	aligned := false
    	for i := 0; i < 16; i++ {
    		x := runtime.Escape(new(byte))
    		if uintptr(unsafe.Pointer(x))&0xf == 0xf {
    			aligned = true
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Sep 05 23:35:29 UTC 2023
    - 10.6K bytes
    - Viewed (0)
  8. src/runtime/arena.go

    		if cap >= 0 {
    			return newarray(typ, cap)
    		}
    		return newobject(typ)
    	}
    
    	// Prevent preemption as we set up the space for a new object.
    	//
    	// Act like we're allocating.
    	mp := acquirem()
    	if mp.mallocing != 0 {
    		throw("malloc deadlock")
    	}
    	if mp.gsignal == getg() {
    		throw("malloc during signal")
    	}
    	mp.mallocing = 1
    
    	var ptr unsafe.Pointer
    	if !typ.Pointers() {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:44:56 UTC 2024
    - 37.9K bytes
    - Viewed (0)
  9. src/runtime/os_linux.go

    	//    modified state.
    	//
    	// We achieve these through different mechanisms:
    	//
    	// 1. Addition of new Ms to allm in allocm happens before clone of its
    	//    OS thread later in newm.
    	// 2. newm does acquirem to avoid being preempted, ensuring that new Ms
    	//    created in allocm will eventually reach OS thread clone later in
    	//    newm.
    	// 3. We take allocmLock for write here to prevent allocation of new Ms
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 25.9K bytes
    - Viewed (0)
  10. src/runtime/mgclimit.go

    func (l *gcCPULimiterState) startGCTransition(enableGC bool, now int64) {
    	if !l.tryLock() {
    		// This must happen during a STW, so we can't fail to acquire the lock.
    		// If we did, something went wrong. Throw.
    		throw("failed to acquire lock to start a GC transition")
    	}
    	if l.gcEnabled == enableGC {
    		throw("transitioning GC to the same state as before?")
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Apr 22 22:07:41 UTC 2024
    - 17.3K bytes
    - Viewed (0)
Back to top