Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 9 of 9 for sched (0.07 sec)

  1. src/runtime/asm_amd64.s

    	LEAQ	fn+0(FP), AX
    	SUBQ	SP, AX
    
    	// Restore g->sched (== m->curg->sched) from saved values.
    	get_tls(CX)
    	MOVQ	g(CX), SI
    	MOVQ	SP, DI
    	ADDQ	AX, DI
    	MOVQ	-8(DI), BX
    	MOVQ	BX, (g_sched+gobuf_pc)(SI)
    	MOVQ	DI, (g_sched+gobuf_sp)(SI)
    
    	// Switch back to m->g0's stack and restore m->g0->sched.sp.
    	// (Unlike m->curg, the g0 goroutine never uses sched.pc,
    	// so we do not have to restore it.)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sat May 11 20:38:24 UTC 2024
    - 60.4K bytes
    - Viewed (0)
  2. src/runtime/mgcmark.go

    // cached stacks around isn't a problem.
    func markrootFreeGStacks() {
    	// Take list of dead Gs with stacks.
    	lock(&sched.gFree.lock)
    	list := sched.gFree.stack
    	sched.gFree.stack = gList{}
    	unlock(&sched.gFree.lock)
    	if list.empty() {
    		return
    	}
    
    	// Free stacks.
    	q := gQueue{list.head, list.head}
    	for gp := list.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 18 21:25:11 UTC 2024
    - 52.5K bytes
    - Viewed (0)
  3. src/runtime/mgc.go

    	lock(&sched.sudoglock)
    	var sg, sgnext *sudog
    	for sg = sched.sudogcache; sg != nil; sg = sgnext {
    		sgnext = sg.next
    		sg.next = nil
    	}
    	sched.sudogcache = nil
    	unlock(&sched.sudoglock)
    
    	// Clear central defer pool.
    	// Leave per-P pools alone, they have strictly bounded size.
    	lock(&sched.deferlock)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 16:25:21 UTC 2024
    - 62K bytes
    - Viewed (0)
  4. src/runtime/traceback.go

    				// gogo's to curg.sched. Match that.
    				// This keeps morestack() from showing up in the backtrace,
    				// but that makes some sense since it'll never be returned
    				// to.
    				gp = gp.m.curg
    				u.g.set(gp)
    				frame.pc = gp.sched.pc
    				frame.fn = findfunc(frame.pc)
    				f = frame.fn
    				flag = f.flag
    				frame.lr = gp.sched.lr
    				frame.sp = gp.sched.sp
    				u.cgoCtxt = len(gp.cgoCtxt) - 1
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 16:25:21 UTC 2024
    - 55.1K bytes
    - Viewed (0)
  5. src/runtime/mprof.go

    				// saveblockevent)
    				mp.profStack[0] -= 1
    			}
    			nstk += fpTracebackPCs(unsafe.Pointer(getfp()), mp.profStack[1:])
    		} else {
    			mp.profStack[1] = gp.m.curg.sched.pc
    			nstk += 1 + fpTracebackPCs(unsafe.Pointer(gp.m.curg.sched.bp), mp.profStack[2:])
    		}
    	}
    
    	saveBlockEventStack(cycles, rate, mp.profStack[:nstk], which)
    	releasem(mp)
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:37 UTC 2024
    - 53.3K bytes
    - Viewed (0)
  6. src/runtime/mgcpacer.go

    //go:nowritebarrier
    func (c *gcControllerState) enlistWorker() {
    	// If there are idle Ps, wake one so it will run an idle worker.
    	// NOTE: This is suspected of causing deadlocks. See golang.org/issue/19112.
    	//
    	//	if sched.npidle.Load() != 0 && sched.nmspinning.Load() == 0 {
    	//		wakep()
    	//		return
    	//	}
    
    	// There are no idle Ps. If we need more dedicated workers,
    	// try to preempt a running P so it will switch to a worker.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 55.4K bytes
    - Viewed (0)
  7. src/runtime/mbitmap.go

    		KeepAlive(ep)
    		return
    	}
    
    	// stack
    	if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi {
    		found := false
    		var u unwinder
    		for u.initAt(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0); u.valid(); u.next() {
    			if u.frame.sp <= uintptr(p) && uintptr(p) < u.frame.varp {
    				found = true
    				break
    			}
    		}
    		if found {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 00:18:55 UTC 2024
    - 60K bytes
    - Viewed (0)
  8. src/internal/trace/order.go

    			if ms.p == pid {
    				curCtx.M = mid
    				curCtx.P = pid
    				curCtx.G = ms.g
    				found = true
    			}
    		}
    		if !found {
    			return curCtx, false, fmt.Errorf("failed to find sched context for proc %d that's about to be stolen", pid)
    		}
    	}
    	o.queue.push(Event{table: evt, ctx: curCtx, base: *ev})
    	return newCtx, true, nil
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Jun 03 14:56:25 UTC 2024
    - 52.4K bytes
    - Viewed (0)
  9. cmd/iam-store.go

    	// reject such operation (updates to the service account are handled in
    	// a different API).
    	if su, found := cache.iamUsersMap[accessKey]; found {
    		scred := su.Credentials
    		if scred.ParentUser != parentUser {
    			return updatedAt, fmt.Errorf("%w: the service account access key is taken by another user", errIAMServiceAccountNotAllowed)
    		}
    Registered: Sun Jun 16 00:44:34 UTC 2024
    - Last Modified: Thu Jun 13 22:26:38 UTC 2024
    - 75.8K bytes
    - Viewed (0)
Back to top