Search Options

Results per page
Sort
Preferred Languages
Advance

Results 51 - 59 of 59 for Setg (0.05 sec)

  1. src/runtime/select.go

    			}
    			if c.qcount < c.dataqsiz {
    				goto bufsend
    			}
    		}
    	}
    
    	if !block {
    		selunlock(scases, lockorder)
    		casi = -1
    		goto retc
    	}
    
    	// pass 2 - enqueue on all chans
    	gp = getg()
    	if gp.waiting != nil {
    		throw("gp.waiting != nil")
    	}
    	nextp = &gp.waiting
    	for _, casei := range lockorder {
    		casi = int(casei)
    		cas = &scases[casi]
    		c = cas.c
    		sg := acquireSudog()
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Mar 13 21:36:04 UTC 2024
    - 15K bytes
    - Viewed (0)
  2. src/internal/trace/trace_test.go

    		}
    
    		// Examine the execution tracer's view of the CPU profile samples. Filter it
    		// to only include samples from the single test goroutine. Use the goroutine
    		// ID that was recorded in the events: that should reflect getg().m.curg,
    		// same as the profiler's labels (even when the M is using its g0 stack).
    		totalTraceSamples := 0
    		traceSamples := 0
    		traceStacks := make(map[string]int)
    		r, err := trace.NewReader(bytes.NewReader(tb))
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:48:18 UTC 2024
    - 18.5K bytes
    - Viewed (0)
  3. src/runtime/string.go

    }
    
    // stringDataOnStack reports whether the string's data is
    // stored on the current goroutine's stack.
    func stringDataOnStack(s string) bool {
    	ptr := uintptr(unsafe.Pointer(unsafe.StringData(s)))
    	stk := getg().stack
    	return stk.lo <= ptr && ptr < stk.hi
    }
    
    func rawstringtmp(buf *tmpBuf, l int) (s string, b []byte) {
    	if buf != nil && l <= len(buf) {
    		b = buf[:l]
    		s = slicebytetostringtmp(&b[0], len(b))
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 01:17:26 UTC 2024
    - 13.4K bytes
    - Viewed (0)
  4. src/runtime/debuglog.go

    	}
    
    	// Reserve space for framing header.
    	l.w.ensure(debugLogHeaderSize)
    	l.w.write += debugLogHeaderSize
    
    	// Write record header.
    	l.w.uvarint(tick - l.w.tick)
    	l.w.uvarint(nano - l.w.nano)
    	gp := getg()
    	if gp != nil && gp.m != nil && gp.m.p != 0 {
    		l.w.varint(int64(gp.m.p.ptr().id))
    	} else {
    		l.w.varint(-1)
    	}
    
    	return l
    }
    
    // A dlogger writes to the debug log.
    //
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Apr 02 15:10:48 UTC 2024
    - 18.3K bytes
    - Viewed (0)
  5. src/runtime/heapdump.go

    	dumpgs()
    	dumpms()
    	dumproots()
    	dumpmemstats(m)
    	dumpmemprof()
    	dumpint(tagEOF)
    	flush()
    }
    
    func writeheapdump_m(fd uintptr, m *MemStats) {
    	assertWorldStopped()
    
    	gp := getg()
    	casGToWaiting(gp.m.curg, _Grunning, waitReasonDumpingHeap)
    
    	// Set dump file.
    	dumpfd = fd
    
    	// Call dump routine.
    	mdump(m)
    
    	// Reset dump file.
    	dumpfd = 0
    	if tmpbuf != nil {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Apr 09 04:07:57 UTC 2024
    - 17.6K bytes
    - Viewed (0)
  6. src/cmd/compile/internal/ssa/_gen/genericOps.go

    	{name: "NilCheck", argLength: 2, nilCheck: true},     // arg0=ptr, arg1=mem. Panics if arg0 is nil. Returns the ptr unmodified.
    
    	// Pseudo-ops
    	{name: "GetG", argLength: 1, zeroWidth: true}, // runtime.getg() (read g pointer). arg0=mem
    	{name: "GetClosurePtr"},                       // get closure pointer from dedicated register
    	{name: "GetCallerPC"},                         // for getcallerpc intrinsic
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 15:49:20 UTC 2024
    - 42.6K bytes
    - Viewed (0)
  7. src/runtime/mfinal.go

    	return true
    }
    
    // This is the goroutine that runs all of the finalizers.
    func runfinq() {
    	var (
    		frame    unsafe.Pointer
    		framecap uintptr
    		argRegs  int
    	)
    
    	gp := getg()
    	lock(&finlock)
    	fing = gp
    	unlock(&finlock)
    
    	for {
    		lock(&finlock)
    		fb := finq
    		finq = nil
    		if fb == nil {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jun 07 01:56:56 UTC 2024
    - 19K bytes
    - Viewed (0)
  8. src/runtime/iface.go

    func itabAdd(m *itab) {
    	// Bugs can lead to calling this while mallocing is set,
    	// typically because this is called while panicking.
    	// Crash reliably, rather than only when we need to grow
    	// the hash table.
    	if getg().m.mallocing != 0 {
    		throw("malloc deadlock")
    	}
    
    	t := itabTable
    	if t.count >= 3*(t.size/4) { // 75% load factor
    		// Grow hash table.
    		// t2 = new(itabTableType) + some additional entries
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 22.5K bytes
    - Viewed (0)
  9. src/runtime/metrics_test.go

    	*runtime.CasGStatusAlwaysTrack = true
    
    	mu.Lock1()
    
    	// Start up a goroutine to wait on the lock.
    	gc := make(chan *runtime.G)
    	done := make(chan bool)
    	go func() {
    		gc <- runtime.Getg()
    
    		for {
    			mu.Lock2()
    			mu.Unlock2()
    			if <-done {
    				return
    			}
    		}
    	}()
    	gp := <-gc
    
    	// Set the block time high enough so that it will always show up, even
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:52:17 UTC 2024
    - 45K bytes
    - Viewed (0)
Back to top