Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 6 of 6 for Preemptable (0.15 sec)

  1. src/runtime/mgcmark.go

    		// would be a performance hit.
    		// Instead we recheck it here on the non-preemptible system
    		// stack to determine if we should perform an assist.
    
    		// GC is done, so ignore any remaining debt.
    		gp.gcAssistBytes = 0
    		return
    	}
    	// Track time spent in this assist. Since we're on the
    	// system stack, this is non-preemptible, so we can
    	// just measure start and end time.
    	//
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 18 21:25:11 UTC 2024
    - 52.5K bytes
    - Viewed (0)
  2. src/runtime/mgc.go

    func gcStart(trigger gcTrigger) {
    	// Since this is called from malloc and malloc is called in
    	// the guts of a number of libraries that might be holding
    	// locks, don't attempt to start GC in non-preemptible or
    	// potentially unstable situations.
    	mp := acquirem()
    	if gp := getg(); gp == mp.g0 || mp.locks > 1 || mp.preemptoff != "" {
    		releasem(mp)
    		return
    	}
    	releasem(mp)
    	mp = nil
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 16:25:21 UTC 2024
    - 62K bytes
    - Viewed (0)
  3. src/runtime/malloc.go

    // weight allocation. If it is a heavy weight allocation the caller must
    // determine whether a new GC cycle needs to be started or if the GC is active
    // whether this goroutine needs to assist the GC.
    //
    // Must run in a non-preemptible context since otherwise the owner of
    // c could change.
    func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) {
    	s = c.alloc[spc]
    	shouldhelpgc = false
    	freeIndex := s.nextFreeIndex()
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 59.6K bytes
    - Viewed (0)
  4. src/runtime/mheap.go

    	} else {
    		return uint(i) & (1<<arenaL2Bits - 1)
    	}
    }
    
    // inheap reports whether b is a pointer into a (potentially dead) heap object.
    // It returns false for pointers into mSpanManual spans.
    // Non-preemptible because it is used by write barriers.
    //
    //go:nowritebarrier
    //go:nosplit
    func inheap(b uintptr) bool {
    	return spanOfHeap(b) != nil
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 78K bytes
    - Viewed (0)
  5. src/internal/trace/order.go

    	// before we get here (after the transfer out) but that's OK: that new
    	// P won't be in the ProcSyscall state anymore.
    	//
    	// Basically: while we have a preemptible P, don't advance, because we
    	// *know* from the event that we're going to lose it at some point during
    	// the syscall. We shouldn't advance until that happens.
    	if curCtx.P != NoProc {
    		pState, ok := o.pStates[curCtx.P]
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Jun 03 14:56:25 UTC 2024
    - 52.4K bytes
    - Viewed (0)
  6. src/cmd/internal/obj/riscv/obj.go

    	} else {
    		p.To.Sym = ctxt.Lookup("runtime.morestack")
    	}
    	if to_more != nil {
    		to_more.To.SetTarget(p)
    	}
    	jalToSym(ctxt, p, REG_X5)
    
    	// The instructions which unspill regs should be preemptible.
    	p = ctxt.EndUnsafePoint(p, newprog, -1)
    	p = cursym.Func().UnspillRegisterArgs(p, newprog)
    
    	// JMP start
    	p = obj.Appendp(p, newprog)
    	p.As = AJAL
    	p.To = obj.Addr{Type: obj.TYPE_BRANCH}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sun Apr 07 03:32:27 UTC 2024
    - 77K bytes
    - Viewed (0)
Back to top