Search Options

Results per page
Sort
Preferred Languages
Advance

Results 71 - 80 of 137 for Setg (0.06 sec)

  1. src/runtime/coro.go

    // and then calls coroexit to remove the extra concurrency.
    func corostart() {
    	gp := getg()
    	c := gp.coroarg
    	gp.coroarg = nil
    
    	defer coroexit(c)
    	c.f(c)
    }
    
    // coroexit is like coroswitch but closes the coro
    // and exits the current goroutine
    func coroexit(c *coro) {
    	gp := getg()
    	gp.coroarg = c
    	gp.coroexit = true
    	mcall(coroswitch_m)
    }
    
    //go:linkname coroswitch
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jun 07 19:09:18 UTC 2024
    - 7.4K bytes
    - Viewed (0)
  2. src/runtime/os_openbsd.go

    // Called to initialize a new m (including the bootstrap m).
    // Called on the new thread, can not allocate memory.
    func minit() {
    	getg().m.procid = uint64(getthrid())
    	minitSignals()
    }
    
    // Called from dropm to undo the effect of an minit.
    //
    //go:nosplit
    func unminit() {
    	unminitSignals()
    	getg().m.procid = 0
    }
    
    // Called from exitm, but not from drop, to undo the effect of thread-owned
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 6.2K bytes
    - Viewed (0)
  3. src/runtime/HACKING.md

    so their memory remains type stable. As a result, the runtime can
    avoid write barriers in the depths of the scheduler.
    
    `getg()` and `getg().m.curg`
    ----------------------------
    
    To get the current user `g`, use `getg().m.curg`.
    
    `getg()` alone returns the current `g`, but when executing on the
    system or signal stacks, this will return the current M's "g0" or
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 13.9K bytes
    - Viewed (0)
  4. src/runtime/debug.go

    //
    // Ensure mayMoreStackPreempt can be called for all ABIs.
    //
    //go:nosplit
    //go:linkname mayMoreStackPreempt
    func mayMoreStackPreempt() {
    	// Don't do anything on the g0 or gsignal stack.
    	gp := getg()
    	if gp == gp.m.g0 || gp == gp.m.gsignal {
    		return
    	}
    	// Force a preemption, unless the stack is already poisoned.
    	if gp.stackguard0 < stackPoisonMin {
    		gp.stackguard0 = stackPreempt
    	}
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sat May 11 20:38:24 UTC 2024
    - 4.2K bytes
    - Viewed (0)
  5. src/runtime/tracestack.go

    		// symbolizer.
    		pcBuf[0] = logicalStackSentinel
    		if getg() == gp {
    			nstk += callers(skip+1, pcBuf[1:])
    		} else if gp != nil {
    			nstk += gcallers(gp, skip, pcBuf[1:])
    		}
    	} else {
    		// Fast path: Unwind using frame pointers.
    		pcBuf[0] = uintptr(skip)
    		if getg() == gp {
    			nstk += fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf[1:])
    		} else if gp != nil {
    			// Three cases:
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue May 21 14:38:56 UTC 2024
    - 11K bytes
    - Viewed (0)
  6. src/runtime/os_darwin.go

    		minitSignalStack()
    	}
    	minitSignalMask()
    	getg().m.procid = uint64(pthread_self())
    }
    
    // Called from dropm to undo the effect of an minit.
    //
    //go:nosplit
    func unminit() {
    	// iOS does not support alternate signal stack.
    	// See minit.
    	if !(GOOS == "ios" && GOARCH == "arm64") {
    		unminitSignals()
    	}
    	getg().m.procid = 0
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Dec 05 20:34:30 UTC 2023
    - 11.9K bytes
    - Viewed (0)
  7. src/runtime/os3_plan9.go

    	"internal/goarch"
    	"internal/stringslite"
    	"unsafe"
    )
    
    // May run during STW, so write barriers are not allowed.
    //
    //go:nowritebarrierrec
    func sighandler(_ureg *ureg, note *byte, gp *g) int {
    	gsignal := getg()
    	mp := gsignal.m
    
    	var t sigTabT
    	var docrash bool
    	var sig int
    	var flags int
    	var level int32
    
    	c := &sigctxt{_ureg}
    	notestr := gostringnocopy(note)
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 15:41:45 UTC 2024
    - 4K bytes
    - Viewed (0)
  8. src/runtime/panic.go

    		// Note: wasm can't tail call, so we can't get the original caller's pc.
    		throw(msg)
    	}
    	// TODO: is this redundant? How could we be in malloc
    	// but not in the runtime? runtime/internal/*, maybe?
    	gp := getg()
    	if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
    		throw(msg)
    	}
    }
    
    // Same as above, but calling from the runtime is allowed.
    //
    // Using this function is necessary for any panic that may be
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 43.8K bytes
    - Viewed (0)
  9. src/runtime/msan.go

    // anyhow for values on the stack. Just ignore msanread when running
    // on the system stack. The other msan functions are fine.
    //
    //go:linkname msanread
    //go:nosplit
    func msanread(addr unsafe.Pointer, sz uintptr) {
    	gp := getg()
    	if gp == nil || gp.m == nil || gp == gp.m.g0 || gp == gp.m.gsignal {
    		return
    	}
    	domsanread(addr, sz)
    }
    
    //go:noescape
    func domsanread(addr unsafe.Pointer, sz uintptr)
    
    //go:linkname msanwrite
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Feb 20 20:50:21 UTC 2024
    - 1.6K bytes
    - Viewed (0)
  10. src/runtime/debug_test.go

    		debug.SetGCPercent(ogcpercent)
    	}
    }
    
    func debugCallWorker(ready chan<- *runtime.G, stop *uint32, done chan<- error) {
    	runtime.LockOSThread()
    	defer runtime.UnlockOSThread()
    
    	ready <- runtime.Getg()
    
    	x := 2
    	debugCallWorker2(stop, &x)
    	if x != 1 {
    		done <- fmt.Errorf("want x = 2, got %d; register pointer not adjusted?", x)
    	}
    	close(done)
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Sep 08 15:08:04 UTC 2023
    - 8K bytes
    - Viewed (0)
Back to top