Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 283 for curg (0.04 sec)

  1. src/runtime/traceevent.go

    	w := tl.writer()
    	if pp := tl.mp.p.ptr(); pp != nil && !pp.trace.statusWasTraced(tl.gen) && pp.trace.acquireStatus(tl.gen) {
    		w = w.writeProcStatus(uint64(pp.id), procStatus, pp.trace.inSweep)
    	}
    	if gp := tl.mp.curg; gp != nil && !gp.trace.statusWasTraced(tl.gen) && gp.trace.acquireStatus(tl.gen) {
    		w = w.writeGoStatus(uint64(gp.goid), int64(tl.mp.procid), goStatus, gp.inMarkAssist, 0 /* no stack */)
    	}
    	return traceEventWriter{w}
    }
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:47:01 UTC 2024
    - 9.2K bytes
    - Viewed (0)
  2. src/runtime/asm_wasm.s

    // func switchToCrashStack0(fn func())
    TEXT runtime·switchToCrashStack0(SB), NOSPLIT, $0-8
    	MOVD fn+0(FP), CTXT	// context register
    	MOVD	g_m(g), R2	// curm
    
    	// set g to gcrash
    	MOVD	$runtime·gcrash(SB), g	// g = &gcrash
    	MOVD	R2, g_m(g)	// g.m = curm
    	MOVD	g, m_g0(R2)	// curm.g0 = g
    
    	// switch to crashstack
    	I64Load (g_stack+stack_hi)(g)
    	I64Const $(-4*8)
    	I64Add
    	I32WrapI64
    	Set SP
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Nov 20 21:26:51 UTC 2023
    - 11.8K bytes
    - Viewed (0)
  3. src/runtime/mbitmap.go

    		// for that memory to get freed.
    		KeepAlive(ep)
    		return
    	}
    
    	// stack
    	if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi {
    		found := false
    		var u unwinder
    		for u.initAt(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0); u.valid(); u.next() {
    			if u.frame.sp <= uintptr(p) && uintptr(p) < u.frame.varp {
    				found = true
    				break
    			}
    		}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 00:18:55 UTC 2024
    - 60K bytes
    - Viewed (0)
  4. src/runtime/mgcwork.go

    	lock(&work.wbufSpans.lock)
    	if gcphase != _GCoff || work.wbufSpans.free.isEmpty() {
    		unlock(&work.wbufSpans.lock)
    		return false
    	}
    	systemstack(func() {
    		gp := getg().m.curg
    		for i := 0; i < batchSize && !(preemptible && gp.preempt); i++ {
    			span := work.wbufSpans.free.first
    			if span == nil {
    				break
    			}
    			work.wbufSpans.free.remove(span)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 12.9K bytes
    - Viewed (0)
  5. src/runtime/mgcmark.go

    			// If this is a self-scan, put the user G in
    			// _Gwaiting to prevent self-deadlock. It may
    			// already be in _Gwaiting if this is a mark
    			// worker or we're in mark termination.
    			userG := getg().m.curg
    			selfScan := gp == userG && readgstatus(userG) == _Grunning
    			if selfScan {
    				casGToWaitingForGC(userG, _Grunning, waitReasonGarbageCollectionScan)
    			}
    
    			// TODO: suspendG blocks (and spins) until gp
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 18 21:25:11 UTC 2024
    - 52.5K bytes
    - Viewed (0)
  6. src/internal/trace/trace_test.go

    		// Examine the execution tracer's view of the CPU profile samples. Filter it
    		// to only include samples from the single test goroutine. Use the goroutine
    		// ID that was recorded in the events: that should reflect getg().m.curg,
    		// same as the profiler's labels (even when the M is using its g0 stack).
    		totalTraceSamples := 0
    		traceSamples := 0
    		traceStacks := make(map[string]int)
    		r, err := trace.NewReader(bytes.NewReader(tb))
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:48:18 UTC 2024
    - 18.5K bytes
    - Viewed (0)
  7. src/runtime/race_ppc64le.s

    // See racecallback for command codes.
    TEXT	runtime·racecallbackthunk(SB), NOSPLIT|NOFRAME, $0
    	// Handle command raceGetProcCmd (0) here.
    	// First, code below assumes that we are on curg, while raceGetProcCmd
    	// can be executed on g0. Second, it is called frequently, so will
    	// benefit from this fast path.
    	MOVD	$0, R0		// clear R0 since we came from C code
    	CMP	R3, $0
    	BNE	rest
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:37:29 UTC 2024
    - 17K bytes
    - Viewed (0)
  8. src/runtime/trace.go

    		if gp.trace.statusWasTraced(gen) {
    			return
    		}
    		// Scribble down information about this goroutine.
    		ug := untracedG{gp: gp, mid: -1}
    		systemstack(func() {
    			me := getg().m.curg
    			// We don't have to handle this G status transition because we
    			// already eliminated ourselves from consideration above.
    			casGToWaitingForGC(me, _Grunning, waitReasonTraceGoroutineStatus)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 21:17:41 UTC 2024
    - 37.1K bytes
    - Viewed (0)
  9. src/runtime/sema.go

    	semacquire1(addr, false, 0, 0, waitReasonSemacquire)
    }
    
    func semacquire1(addr *uint32, lifo bool, profile semaProfileFlags, skipframes int, reason waitReason) {
    	gp := getg()
    	if gp != gp.m.curg {
    		throw("semacquire not on the G stack")
    	}
    
    	// Easy case.
    	if cansemacquire(addr) {
    		return
    	}
    
    	// Harder case:
    	//	increment waiter count
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 19K bytes
    - Viewed (0)
  10. src/runtime/runtime2.go

    	sigmask       sigset            // storage for saved signal mask
    	tls           [tlsSlots]uintptr // thread-local storage (for x86 extern register)
    	mstartfn      func()
    	curg          *g       // current running goroutine
    	caughtsig     guintptr // goroutine running during fatal signal
    	p             puintptr // attached p for executing go code (nil if not executing go code)
    	nextp         puintptr
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:37 UTC 2024
    - 47.9K bytes
    - Viewed (0)
Back to top