Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 75 for preemption (0.24 sec)

  1. src/runtime/cgocall.go

    	// saved by entersyscall here.
    	entersyscall()
    
    	// Tell asynchronous preemption that we're entering external
    	// code. We do this after entersyscall because this may block
    	// and cause an async preemption to fail, but at this point a
    	// sync preemption will succeed (though this is not a matter
    	// of correctness).
    	osPreemptExtEnter(mp)
    
    	mp.incgo = true
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 01:16:47 UTC 2024
    - 24.2K bytes
    - Viewed (0)
  2. src/sync/pool.go

    		}
    	}
    
    	// Mark the victim cache as empty for future gets don't bother
    	// with it.
    	atomic.StoreUintptr(&p.victimSize, 0)
    
    	return nil
    }
    
    // pin pins the current goroutine to P, disables preemption and
    // returns poolLocal pool for the P and the P's id.
    // Caller must call runtime_procUnpin() when done with the pool.
    func (p *Pool) pin() (*poolLocal, int) {
    	// Check whether p is nil to get a panic.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jun 07 21:14:51 UTC 2024
    - 9.4K bytes
    - Viewed (0)
  3. src/runtime/mgc.go

    		// Preemption must not occur here, or another G might see
    		// p.gcMarkWorkerMode.
    
    		// Disable preemption so we can use the gcw. If the
    		// scheduler wants to preempt us, we'll stop draining,
    		// dispose the gcw, and then preempt.
    		node.m.set(acquirem())
    		pp := gp.m.p.ptr() // P can't change with preemption disabled.
    
    		if gcBlackenEnabled == 0 {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 16:25:21 UTC 2024
    - 62K bytes
    - Viewed (0)
  4. src/runtime/runtime2.go

    	vdsoPC uintptr // PC for traceback while in VDSO call
    
    	// preemptGen counts the number of completed preemption
    	// signals. This is used to detect when a preemption is
    	// requested, but fails.
    	preemptGen atomic.Uint32
    
    	// Whether this is a pending preemption signal on this M.
    	signalPending atomic.Uint32
    
    	// pcvalue lookup cache
    	pcvalueCache pcvalueCache
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:37 UTC 2024
    - 47.9K bytes
    - Viewed (0)
  5. src/runtime/proc.go

    	// already.
    	if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
    		return
    	}
    
    	// Disable preemption until ownership of pp transfers to the next M in
    	// startm. Otherwise preemption here would leave pp stuck waiting to
    	// enter _Pgcstop.
    	//
    	// See preemption comment on acquirem in startm for more details.
    	mp := acquirem()
    
    	var pp *p
    	lock(&sched.lock)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 207.5K bytes
    - Viewed (0)
  6. src/runtime/lock_futex.go

    	}
    
    	gp := getg()
    	gp.m.mLockProfile.recordUnlock(l)
    	gp.m.locks--
    	if gp.m.locks < 0 {
    		throw("runtime·unlock: lock count")
    	}
    	if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
    		gp.stackguard0 = stackPreempt
    	}
    }
    
    // One-time notifications.
    func noteclear(n *note) {
    	n.key = 0
    }
    
    func notewakeup(n *note) {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:34 UTC 2024
    - 5.4K bytes
    - Viewed (0)
  7. src/runtime/mgcmark.go

    	// most this size. Since we can scan 1–2 MB/ms, 128 KB bounds
    	// scan preemption at ~100 µs.
    	//
    	// This must be > _MaxSmallSize so that the object base is the
    	// span base.
    	maxObletBytes = 128 << 10
    
    	// drainCheckThreshold specifies how many units of work to do
    	// between self-preemption checks in gcDrain. Assuming a scan
    	// rate of 1 MB/ms, this is ~100 µs. Lower values have higher
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 18 21:25:11 UTC 2024
    - 52.5K bytes
    - Viewed (0)
  8. pkg/scheduler/schedule_one.go

    		// will fit due to the preemption. It is also possible that a different pod will schedule
    		// into the resources that were preempted, but this is harmless.
    
    		if !fwk.HasPostFilterPlugins() {
    			logger.V(3).Info("No PostFilter plugins are registered, so no preemption will be performed")
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Thu Jun 06 13:28:08 UTC 2024
    - 43.4K bytes
    - Viewed (0)
  9. src/runtime/lock_sema.go

    				break
    			}
    		}
    	}
    	gp.m.mLockProfile.recordUnlock(l)
    	gp.m.locks--
    	if gp.m.locks < 0 {
    		throw("runtime·unlock: lock count")
    	}
    	if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
    		gp.stackguard0 = stackPreempt
    	}
    }
    
    // One-time notifications.
    func noteclear(n *note) {
    	n.key = 0
    }
    
    func notewakeup(n *note) {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:37 UTC 2024
    - 6.8K bytes
    - Viewed (0)
  10. src/runtime/stack.go

    	// We are interested in preempting user Go code, not runtime code.
    	// If we're holding locks, mallocing, or preemption is disabled, don't
    	// preempt.
    	// This check is very early in newstack so that even the status change
    	// from Grunning to Gwaiting and back doesn't happen in this case.
    	// That status change by itself can be viewed as a small preemption,
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 41.1K bytes
    - Viewed (0)
Back to top