Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 147 for preemption (0.45 sec)

  1. src/sync/pool.go

    		}
    	}
    
    	// Mark the victim cache as empty for future gets don't bother
    	// with it.
    	atomic.StoreUintptr(&p.victimSize, 0)
    
    	return nil
    }
    
    // pin pins the current goroutine to P, disables preemption and
    // returns poolLocal pool for the P and the P's id.
    // Caller must call runtime_procUnpin() when done with the pool.
    func (p *Pool) pin() (*poolLocal, int) {
    	// Check whether p is nil to get a panic.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jun 07 21:14:51 UTC 2024
    - 9.4K bytes
    - Viewed (1)
  2. src/runtime/mgc.go

    		// Preemption must not occur here, or another G might see
    		// p.gcMarkWorkerMode.
    
    		// Disable preemption so we can use the gcw. If the
    		// scheduler wants to preempt us, we'll stop draining,
    		// dispose the gcw, and then preempt.
    		node.m.set(acquirem())
    		pp := gp.m.p.ptr() // P can't change with preemption disabled.
    
    		if gcBlackenEnabled == 0 {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 16:25:21 UTC 2024
    - 62K bytes
    - Viewed (0)
  3. pkg/scheduler/extender.go

    	return h.ignorable
    }
    
    // SupportsPreemption returns true if an extender supports preemption.
    // An extender should have preempt verb defined and enabled its own node cache.
    func (h *HTTPExtender) SupportsPreemption() bool {
    	return len(h.preemptVerb) > 0
    }
    
    // ProcessPreemption returns filtered candidate nodes and victims after running preemption logic in extender.
    func (h *HTTPExtender) ProcessPreemption(
    	pod *v1.Pod,
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Mon Feb 26 19:07:19 UTC 2024
    - 13.4K bytes
    - Viewed (0)
  4. pkg/scheduler/framework/plugins/podtopologyspread/filtering.go

    }
    
    // CAVEAT: the reason that `[2]criticalPath` can work is based on the implementation of current
    // preemption algorithm, in particular the following 2 facts:
    // Fact 1: we only preempt pods on the same node, instead of pods on multiple nodes.
    // Fact 2: each node is evaluated on a separate copy of the preFilterState during its preemption cycle.
    // If we plan to turn to a more complex algorithm like "arbitrary pods on multiple nodes", this
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Wed Feb 28 10:42:29 UTC 2024
    - 12.4K bytes
    - Viewed (1)
  5. src/cmd/internal/obj/plist.go

    // p0 is the start of the instruction stream.
    // isUnsafePoint(p) returns true if p is not safe for async preemption.
    // isRestartable(p) returns true if we can restart at the start of p (this Prog)
    // upon async preemption. (Currently multi-Prog restartable sequence is not
    // supported.)
    // isRestartable can be nil. In this case it is treated as always returning false.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Sep 01 15:52:41 UTC 2023
    - 11.5K bytes
    - Viewed (0)
  6. src/runtime/runtime2.go

    	vdsoPC uintptr // PC for traceback while in VDSO call
    
    	// preemptGen counts the number of completed preemption
    	// signals. This is used to detect when a preemption is
    	// requested, but fails.
    	preemptGen atomic.Uint32
    
    	// Whether this is a pending preemption signal on this M.
    	signalPending atomic.Uint32
    
    	// pcvalue lookup cache
    	pcvalueCache pcvalueCache
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:37 UTC 2024
    - 47.9K bytes
    - Viewed (0)
  7. src/runtime/proc.go

    	// already.
    	if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
    		return
    	}
    
    	// Disable preemption until ownership of pp transfers to the next M in
    	// startm. Otherwise preemption here would leave pp stuck waiting to
    	// enter _Pgcstop.
    	//
    	// See preemption comment on acquirem in startm for more details.
    	mp := acquirem()
    
    	var pp *p
    	lock(&sched.lock)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 207.5K bytes
    - Viewed (0)
  8. src/runtime/lock_futex.go

    	}
    
    	gp := getg()
    	gp.m.mLockProfile.recordUnlock(l)
    	gp.m.locks--
    	if gp.m.locks < 0 {
    		throw("runtime·unlock: lock count")
    	}
    	if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
    		gp.stackguard0 = stackPreempt
    	}
    }
    
    // One-time notifications.
    func noteclear(n *note) {
    	n.key = 0
    }
    
    func notewakeup(n *note) {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:34 UTC 2024
    - 5.4K bytes
    - Viewed (0)
  9. src/runtime/mgcwork.go

    //
    //	(preemption must be disabled)
    //	gcw := &getg().m.p.ptr().gcw
    //	.. call gcw.put() to produce and gcw.tryGet() to consume ..
    //
    // It's important that any use of gcWork during the mark phase prevent
    // the garbage collector from transitioning to mark termination since
    // gcWork may locally hold GC work buffers. This can be done by
    // disabling preemption (systemstack or acquirem).
    type gcWork struct {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 12.9K bytes
    - Viewed (0)
  10. src/runtime/mgcmark.go

    	// most this size. Since we can scan 1–2 MB/ms, 128 KB bounds
    	// scan preemption at ~100 µs.
    	//
    	// This must be > _MaxSmallSize so that the object base is the
    	// span base.
    	maxObletBytes = 128 << 10
    
    	// drainCheckThreshold specifies how many units of work to do
    	// between self-preemption checks in gcDrain. Assuming a scan
    	// rate of 1 MB/ms, this is ~100 µs. Lower values have higher
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 18 21:25:11 UTC 2024
    - 52.5K bytes
    - Viewed (0)
Back to top