Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 363 for getg (0.07 sec)

  1. src/runtime/tracestack.go

    		// symbolizer.
    		pcBuf[0] = logicalStackSentinel
    		if getg() == gp {
    			nstk += callers(skip+1, pcBuf[1:])
    		} else if gp != nil {
    			nstk += gcallers(gp, skip, pcBuf[1:])
    		}
    	} else {
    		// Fast path: Unwind using frame pointers.
    		pcBuf[0] = uintptr(skip)
    		if getg() == gp {
    			nstk += fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf[1:])
    		} else if gp != nil {
    			// Three cases:
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue May 21 14:38:56 UTC 2024
    - 11K bytes
    - Viewed (0)
  2. src/runtime/signal_unix.go

    		// it may hit stack split that is not expected here.
    		if gp != nil {
    			setg(nil)
    		}
    		badsignal(uintptr(sig), c)
    		// Restore g
    		if gp != nil {
    			setg(gp)
    		}
    		return
    	}
    
    	setg(gp.m.gsignal)
    
    	// If some non-Go code called sigaltstack, adjust.
    	var gsignalStack gsignalStack
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 10 16:04:54 UTC 2024
    - 45K bytes
    - Viewed (0)
  3. src/runtime/panic.go

    	if goarch.IsWasm == 0 && stringslite.HasPrefix(funcname(findfunc(pc)), "runtime.") {
    		// Note: wasm can't tail call, so we can't get the original caller's pc.
    		throw(msg)
    	}
    	// TODO: is this redundant? How could we be in malloc
    	// but not in the runtime? runtime/internal/*, maybe?
    	gp := getg()
    	if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
    		throw(msg)
    	}
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 43.8K bytes
    - Viewed (0)
  4. src/runtime/os3_plan9.go

    	"internal/goarch"
    	"internal/stringslite"
    	"unsafe"
    )
    
    // May run during STW, so write barriers are not allowed.
    //
    //go:nowritebarrierrec
    func sighandler(_ureg *ureg, note *byte, gp *g) int {
    	gsignal := getg()
    	mp := gsignal.m
    
    	var t sigTabT
    	var docrash bool
    	var sig int
    	var flags int
    	var level int32
    
    	c := &sigctxt{_ureg}
    	notestr := gostringnocopy(note)
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 15:41:45 UTC 2024
    - 4K bytes
    - Viewed (0)
  5. src/runtime/msan.go

    // anyhow for values on the stack. Just ignore msanread when running
    // on the system stack. The other msan functions are fine.
    //
    //go:linkname msanread
    //go:nosplit
    func msanread(addr unsafe.Pointer, sz uintptr) {
    	gp := getg()
    	if gp == nil || gp.m == nil || gp == gp.m.g0 || gp == gp.m.gsignal {
    		return
    	}
    	domsanread(addr, sz)
    }
    
    //go:noescape
    func domsanread(addr unsafe.Pointer, sz uintptr)
    
    //go:linkname msanwrite
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Feb 20 20:50:21 UTC 2024
    - 1.6K bytes
    - Viewed (0)
  6. src/runtime/rwmutex.go

    			lock(&rw.rLock)
    			if rw.readerPass > 0 {
    				// Writer finished.
    				rw.readerPass -= 1
    				unlock(&rw.rLock)
    			} else {
    				// Queue this reader to be woken by
    				// the writer.
    				m := getg().m
    				m.schedlink = rw.readers
    				rw.readers.set(m)
    				unlock(&rw.rLock)
    				notesleep(&m.park)
    				noteclear(&m.park)
    			}
    		})
    	}
    }
    
    // runlock undoes a single rlock call on rw.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Apr 22 14:29:04 UTC 2024
    - 5K bytes
    - Viewed (0)
  7. src/runtime/stack.go

    	}
    	if s := readgstatus(gp); s&_Gscan == 0 {
    		// We don't own the stack via _Gscan. We could still
    		// own it if this is our own user G and we're on the
    		// system stack.
    		if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) {
    			// We don't own the stack.
    			throw("bad status in shrinkstack")
    		}
    	}
    	if !isShrinkStackSafe(gp) {
    		throw("shrinkstack at bad time")
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 41.1K bytes
    - Viewed (0)
  8. src/runtime/mgcmark.go

    // This must be called with preemption enabled.
    func gcAssistAlloc(gp *g) {
    	// Don't assist in non-preemptible contexts. These are
    	// generally fragile and won't allow the assist to block.
    	if getg() == gp.m.g0 {
    		return
    	}
    	if mp := getg().m; mp.locks > 0 || mp.preemptoff != "" {
    		return
    	}
    
    	// This extremely verbose boolean indicates whether we've
    	// entered mark assist from the perspective of the tracer.
    	//
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 18 21:25:11 UTC 2024
    - 52.5K bytes
    - Viewed (0)
  9. src/runtime/atomic_pointer.go

    // See go.dev/issue/67401.
    //
    //go:linkname atomicwb
    //go:nosplit
    func atomicwb(ptr *unsafe.Pointer, new unsafe.Pointer) {
    	slot := (*uintptr)(unsafe.Pointer(ptr))
    	buf := getg().m.p.ptr().wbBuf.get2()
    	buf[0] = *slot
    	buf[1] = uintptr(new)
    }
    
    // atomicstorep performs *ptr = new atomically and invokes a write barrier.
    //
    //go:nosplit
    func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 16:25:21 UTC 2024
    - 4K bytes
    - Viewed (0)
  10. src/runtime/os_linux.go

    	// procid. We need this for asynchronous preemption and it's
    	// useful in debuggers.
    	getg().m.procid = uint64(gettid())
    }
    
    // Called from dropm to undo the effect of an minit.
    //
    //go:nosplit
    func unminit() {
    	unminitSignals()
    	getg().m.procid = 0
    }
    
    // Called from exitm, but not from drop, to undo the effect of thread-owned
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 25.9K bytes
    - Viewed (0)
Back to top