Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 50 for m_gsignal (0.15 sec)

  1. src/runtime/race.go

    }
    
    //go:nosplit
    func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
    	gp := getg()
    	if gp != gp.m.curg {
    		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
    		// Not interesting.
    		return
    	}
    	if callpc != 0 {
    		racefuncenter(callpc)
    	}
    	racewriterangepc1(uintptr(addr), sz, pc)
    	if callpc != 0 {
    		racefuncexit()
    	}
    }
    
    //go:nosplit
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:37:29 UTC 2024
    - 20.4K bytes
    - Viewed (0)
  2. src/runtime/cgocheck.go

    	}
    	if cgoIsGoPointer(unsafe.Pointer(dst)) {
    		return
    	}
    
    	// If we are running on the system stack then dst might be an
    	// address on the stack, which is OK.
    	gp := getg()
    	if gp == gp.m.g0 || gp == gp.m.gsignal {
    		return
    	}
    
    	// Allocating memory can write to various mfixalloc structs
    	// that look like they are non-Go memory.
    	if gp.m.mallocing != 0 {
    		return
    	}
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Apr 09 04:07:57 UTC 2024
    - 7.6K bytes
    - Viewed (0)
  3. src/runtime/stubs.go

    // fn must not return at all; typically it ends by calling schedule, to let the m
    // run other goroutines.
    //
    // mcall can only be called from g stacks (not g0, not gsignal).
    //
    // This must NOT be go:noescape: if fn is a stack-allocated closure,
    // fn puts g on a run queue, and g executes before fn returns, the
    // closure will be invalidated while it is still executing.
    func mcall(fn func(*g))
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 20.2K bytes
    - Viewed (0)
  4. src/runtime/os_linux.go

    }
    
    // Called to initialize a new m (including the bootstrap m).
    // Called on the parent thread (main thread in case of bootstrap), can allocate memory.
    func mpreinit(mp *m) {
    	mp.gsignal = malg(32 * 1024) // Linux wants >= 2K
    	mp.gsignal.m = mp
    }
    
    func gettid() uint32
    
    // Called to initialize a new m (including the bootstrap m).
    // Called on the new thread, cannot allocate memory.
    func minit() {
    	minitSignals()
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 25.9K bytes
    - Viewed (0)
  5. src/runtime/proc.go

    	}
    
    	sigblock(true)
    	unminit()
    
    	// Free the gsignal stack.
    	if mp.gsignal != nil {
    		stackfree(mp.gsignal.stack)
    		// On some platforms, when calling into VDSO (e.g. nanotime)
    		// we store our g on the gsignal stack, if there is one.
    		// Now the stack is freed, unlink it from the m, so we
    		// won't write to it when calling VDSO code.
    		mp.gsignal = nil
    	}
    
    	// Remove m from allm.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 207.5K bytes
    - Viewed (0)
  6. src/runtime/stack.go

    	}
    	if thisg.m.morebuf.g.ptr() != thisg.m.curg {
    		print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
    		morebuf := thisg.m.morebuf
    		traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
    		throw("runtime: wrong goroutine in newstack")
    	}
    
    	gp := thisg.m.curg
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 41.1K bytes
    - Viewed (0)
  7. src/runtime/arena.go

    	}
    
    	// Prevent preemption as we set up the space for a new object.
    	//
    	// Act like we're allocating.
    	mp := acquirem()
    	if mp.mallocing != 0 {
    		throw("malloc deadlock")
    	}
    	if mp.gsignal == getg() {
    		throw("malloc during signal")
    	}
    	mp.mallocing = 1
    
    	var ptr unsafe.Pointer
    	if !typ.Pointers() {
    		// Allocate pointer-less objects from the tail end of the chunk.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:44:56 UTC 2024
    - 37.9K bytes
    - Viewed (0)
  8. src/runtime/runtime2.go

    	// It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
    	// stackguard1 is the stack pointer compared in the //go:systemstack stack growth prologue.
    	// It is stack.lo+StackGuard on g0 and gsignal stacks.
    	// It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
    	stack       stack   // offset known to runtime/cgo
    	stackguard0 uintptr // offset known to liblink
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:37 UTC 2024
    - 47.9K bytes
    - Viewed (0)
  9. src/runtime/malloc.go

    	assistG := deductAssistCredit(size)
    
    	// Set mp.mallocing to keep from being preempted by GC.
    	mp := acquirem()
    	if mp.mallocing != 0 {
    		throw("malloc deadlock")
    	}
    	if mp.gsignal == getg() {
    		throw("malloc during signal")
    	}
    	mp.mallocing = 1
    
    	shouldhelpgc := false
    	dataSize := userSize
    	c := getMCache(mp)
    	if c == nil {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 59.6K bytes
    - Viewed (0)
  10. src/runtime/os_windows.go

    	sigprof(c.ip(), c.sp(), c.lr(), gp, mp)
    }
    
    func gFromSP(mp *m, sp uintptr) *g {
    	if gp := mp.g0; gp != nil && gp.stack.lo < sp && sp < gp.stack.hi {
    		return gp
    	}
    	if gp := mp.gsignal; gp != nil && gp.stack.lo < sp && sp < gp.stack.hi {
    		return gp
    	}
    	if gp := mp.curg; gp != nil && gp.stack.lo < sp && sp < gp.stack.hi {
    		return gp
    	}
    	return nil
    }
    
    func profileLoop() {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Apr 26 22:55:25 UTC 2024
    - 41.5K bytes
    - Viewed (0)
Back to top