Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 59 for Setg (1.89 sec)

  1. src/runtime/preempt.go

    // directly schedule the waiter. The context switch is unavoidable in
    // the signal case.
    //
    //go:systemstack
    func suspendG(gp *g) suspendGState {
    	if mp := getg().m; mp.curg != nil && readgstatus(mp.curg) == _Grunning {
    		// Since we're on the system stack of this M, the user
    		// G is stuck at an unsafe point. If another goroutine
    		// were to try to preempt m.curg, it could deadlock.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 15:41:45 UTC 2024
    - 15.1K bytes
    - Viewed (0)
  2. src/runtime/trace.go

    	if raceenabled {
    		// g0 doesn't have a race context. Borrow the user G's.
    		if getg().racectx != 0 {
    			throw("expected racectx == 0")
    		}
    		getg().racectx = getg().m.curg.racectx
    		// (This defer should get open-coded, which is safe on
    		// the system stack.)
    		defer func() { getg().racectx = 0 }()
    	}
    
    	// This function must not allocate while holding trace.lock:
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 21:17:41 UTC 2024
    - 37.1K bytes
    - Viewed (0)
  3. src/runtime/syscall_windows.go

    	// calls back into Go.
    	c := &getg().m.winsyscall
    	c.fn = fn
    	c.n = uintptr(len(args))
    	if c.n != 0 {
    		c.args = uintptr(noescape(unsafe.Pointer(&args[0])))
    	}
    	cgocall(asmstdcallAddr, unsafe.Pointer(c))
    	// cgocall may reschedule us on to a different M,
    	// but it copies the return values into the new M's
    	// so we can read them from there.
    	c = &getg().m.winsyscall
    	return c.r1, c.r2, c.err
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 20:12:46 UTC 2024
    - 16.6K bytes
    - Viewed (0)
  4. src/cmd/compile/internal/typecheck/func.go

    	}
    	if t.NumResults() == 1 {
    		n.SetType(l.Type().Result(0).Type)
    
    		if n.Op() == ir.OCALLFUNC && n.Fun.Op() == ir.ONAME {
    			if sym := n.Fun.(*ir.Name).Sym(); types.RuntimeSymName(sym) == "getg" {
    				// Emit code for runtime.getg() directly instead of calling function.
    				// Most such rewrites (for example the similar one for math.Sqrt) should be done in walk,
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Mar 06 15:23:18 UTC 2024
    - 21.1K bytes
    - Viewed (0)
  5. src/runtime/traceruntime.go

    	// buffer flushes are rare. Record the lock edge even if it doesn't happen
    	// this time.
    	lockRankMayTraceFlush()
    
    	// Check if we're already locked. If so, return an invalid traceLocker.
    	if getg().m.trace.seqlock.Load()%2 == 1 {
    		return traceLocker{}
    	}
    	return traceAcquireEnabled()
    }
    
    // ok returns true if the traceLocker is valid (i.e. tracing is enabled).
    //
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 25.7K bytes
    - Viewed (0)
  6. src/runtime/runtime.go

    // writeErrData is the common parts of writeErr{,Str}.
    //
    //go:nosplit
    func writeErrData(data *byte, n int32) {
    	write(2, unsafe.Pointer(data), n)
    
    	// If crashing, print a copy to the SetCrashOutput fd.
    	gp := getg()
    	if gp != nil && gp.m.dying > 0 ||
    		gp == nil && panicking.Load() > 0 {
    		if fd := crashFD.Load(); fd != ^uintptr(0) {
    			write(fd, unsafe.Pointer(data), n)
    		}
    	}
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 01:16:47 UTC 2024
    - 9.9K bytes
    - Viewed (0)
  7. src/runtime/netpoll_solaris.go

    	libc_port_associate,
    	libc_port_dissociate,
    	libc_port_getn,
    	libc_port_alert libcFunc
    	netpollWakeSig atomic.Uint32 // used to avoid duplicate calls of netpollBreak
    )
    
    func errno() int32 {
    	return *getg().m.perrno
    }
    
    func port_create() int32 {
    	return int32(sysvicall0(&libc_port_create))
    }
    
    func port_associate(port, source int32, object uintptr, events uint32, user uintptr) int32 {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 11.2K bytes
    - Viewed (0)
  8. src/runtime/arena.go

    	}
    
    	// Prevent preemption as we set up the space for a new object.
    	//
    	// Act like we're allocating.
    	mp := acquirem()
    	if mp.mallocing != 0 {
    		throw("malloc deadlock")
    	}
    	if mp.gsignal == getg() {
    		throw("malloc during signal")
    	}
    	mp.mallocing = 1
    
    	var ptr unsafe.Pointer
    	if !typ.Pointers() {
    		// Allocate pointer-less objects from the tail end of the chunk.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:44:56 UTC 2024
    - 37.9K bytes
    - Viewed (0)
  9. src/runtime/chan.go

    		}
    		c.qcount++
    		unlock(&c.lock)
    		return true
    	}
    
    	if !block {
    		unlock(&c.lock)
    		return false
    	}
    
    	// Block on the channel. Some receiver will complete our operation for us.
    	gp := getg()
    	mysg := acquireSudog()
    	mysg.releasetime = 0
    	if t0 != 0 {
    		mysg.releasetime = -1
    	}
    	// No stack splits between assigning elem and enqueuing mysg
    	// on gp.waiting where copystack can find it.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 01:16:50 UTC 2024
    - 25.9K bytes
    - Viewed (0)
  10. src/runtime/mstats.go

    //
    // nosplit because a stack growth in this function could
    // lead to a stack allocation that could reenter the
    // function.
    //
    //go:nosplit
    func (m *consistentHeapStats) acquire() *heapStatsDelta {
    	if pp := getg().m.p.ptr(); pp != nil {
    		seq := pp.statsSeq.Add(1)
    		if seq%2 == 0 {
    			// Should have been incremented to odd.
    			print("runtime: seq=", seq, "\n")
    			throw("bad sequence number")
    		}
    	} else {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Apr 08 21:03:13 UTC 2024
    - 34.2K bytes
    - Viewed (0)
Back to top