Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 226 for getg (0.06 sec)

  1. src/runtime/debugcall.go

    // explaining why.
    //
    //go:nosplit
    func debugCallCheck(pc uintptr) string {
    	// No user calls from the system stack.
    	if getg() != getg().m.curg {
    		return debugCallSystemStack
    	}
    	if sp := getcallersp(); !(getg().stack.lo < sp && sp <= getg().stack.hi) {
    		// Fast syscalls (nanotime) and racecall switch to the
    		// g0 stack without switching g. We can't safely make
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Apr 05 20:50:21 UTC 2024
    - 7.1K bytes
    - Viewed (0)
  2. src/runtime/lock_futex.go

    }
    
    func notetsleep(n *note, ns int64) bool {
    	gp := getg()
    	if gp != gp.m.g0 && gp.m.preemptoff != "" {
    		throw("notetsleep not on g0")
    	}
    
    	return notetsleep_internal(n, ns)
    }
    
    // same as runtimeĀ·notetsleep, but called on user g (not g0)
    // calls only nosplit functions between entersyscallblock/exitsyscall.
    func notetsleepg(n *note, ns int64) bool {
    	gp := getg()
    	if gp == gp.m.g0 {
    		throw("notetsleepg on g0")
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:34 UTC 2024
    - 5.4K bytes
    - Viewed (0)
  3. src/runtime/lock_sema.go

    }
    
    func notetsleep(n *note, ns int64) bool {
    	gp := getg()
    	if gp != gp.m.g0 {
    		throw("notetsleep not on g0")
    	}
    	semacreate(gp.m)
    	return notetsleep_internal(n, ns, nil, 0)
    }
    
    // same as runtimeĀ·notetsleep, but called on user g (not g0)
    // calls only nosplit functions between entersyscallblock/exitsyscall.
    func notetsleepg(n *note, ns int64) bool {
    	gp := getg()
    	if gp == gp.m.g0 {
    		throw("notetsleepg on g0")
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:37 UTC 2024
    - 6.8K bytes
    - Viewed (0)
  4. src/runtime/proflabel.go

    //
    //go:linkname runtime_setProfLabel runtime/pprof.runtime_setProfLabel
    func runtime_setProfLabel(labels unsafe.Pointer) {
    	// Introduce race edge for read-back via profile.
    	// This would more properly use &getg().labels as the sync address,
    	// but we do the read in a signal handler and can't call the race runtime then.
    	//
    	// This uses racereleasemerge rather than just racerelease so
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 16:25:21 UTC 2024
    - 2.1K bytes
    - Viewed (0)
  5. src/runtime/lock_js.go

    		// observe this.
    		throw("self deadlock")
    	}
    	gp := getg()
    	if gp.m.locks < 0 {
    		throw("lock count")
    	}
    	gp.m.locks++
    	l.key = mutex_locked
    }
    
    func unlock(l *mutex) {
    	unlockWithRank(l)
    }
    
    func unlock2(l *mutex) {
    	if l.key == mutex_unlocked {
    		throw("unlock of unlocked lock")
    	}
    	gp := getg()
    	gp.m.locks--
    	if gp.m.locks < 0 {
    		throw("lock count")
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Nov 21 21:02:20 UTC 2023
    - 7.3K bytes
    - Viewed (0)
  6. src/runtime/mwbbuf.go

    func (b *wbBuf) empty() bool {
    	return b.next == uintptr(unsafe.Pointer(&b.buf[0]))
    }
    
    // getX returns space in the write barrier buffer to store X pointers.
    // getX will flush the buffer if necessary. Callers should use this as:
    //
    //	buf := &getg().m.p.ptr().wbBuf
    //	p := buf.get2()
    //	p[0], p[1] = old, new
    //	... actual memory write ...
    //
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 8.1K bytes
    - Viewed (0)
  7. src/runtime/os_solaris.go

    func asmsysvicall6() // declared for vet; do NOT call
    
    //go:nosplit
    func sysvicall0(fn *libcFunc) uintptr {
    	// Leave caller's PC/SP around for traceback.
    	gp := getg()
    	var mp *m
    	if gp != nil {
    		mp = gp.m
    	}
    	if mp != nil && mp.libcallsp == 0 {
    		mp.libcallg.set(gp)
    		mp.libcallpc = getcallerpc()
    		// sp must be the last, because once async cpu profiler finds
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Jun 06 18:49:01 UTC 2023
    - 6.6K bytes
    - Viewed (0)
  8. src/runtime/lock_wasip1.go

    		// observe this.
    		throw("self deadlock")
    	}
    	gp := getg()
    	if gp.m.locks < 0 {
    		throw("lock count")
    	}
    	gp.m.locks++
    	l.key = mutex_locked
    }
    
    func unlock(l *mutex) {
    	unlockWithRank(l)
    }
    
    func unlock2(l *mutex) {
    	if l.key == mutex_unlocked {
    		throw("unlock of unlocked lock")
    	}
    	gp := getg()
    	gp.m.locks--
    	if gp.m.locks < 0 {
    		throw("lock count")
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Nov 21 21:02:20 UTC 2023
    - 2K bytes
    - Viewed (0)
  9. src/runtime/os_aix.go

    // This will return a pointer to errno.
    func miniterrno() {
    	mp := getg().m
    	r, _ := syscall0(&libc__Errno)
    	mp.perrno = r
    
    }
    
    func minit() {
    	miniterrno()
    	minitSignals()
    	getg().m.procid = uint64(pthread_self())
    }
    
    func unminit() {
    	unminitSignals()
    	getg().m.procid = 0
    }
    
    // Called from exitm, but not from drop, to undo the effect of thread-owned
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 8.9K bytes
    - Viewed (0)
  10. src/runtime/export_debug_test.go

    	// a signal handler. Add the go:nowritebarrierrec annotation and restructure
    	// this to avoid write barriers.
    
    	switch h.gp.atomicstatus.Load() {
    	case _Grunning:
    		if getg().m != h.mp {
    			println("trap on wrong M", getg().m, h.mp)
    			return false
    		}
    		// Save the signal context
    		h.saveSigContext(ctxt)
    		// Set PC to debugCallV2.
    		ctxt.setsigpc(uint64(abi.FuncPCABIInternal(debugCallV2)))
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 15:41:45 UTC 2024
    - 5.1K bytes
    - Viewed (0)
Back to top