Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 54 for getg (0.09 sec)

  1. src/runtime/mgcmark.go

    // This must be called with preemption enabled.
    func gcAssistAlloc(gp *g) {
    	// Don't assist in non-preemptible contexts. These are
    	// generally fragile and won't allow the assist to block.
    	if getg() == gp.m.g0 {
    		return
    	}
    	if mp := getg().m; mp.locks > 0 || mp.preemptoff != "" {
    		return
    	}
    
    	// This extremely verbose boolean indicates whether we've
    	// entered mark assist from the perspective of the tracer.
    	//
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 18 21:25:11 UTC 2024
    - 52.5K bytes
    - Viewed (0)
  2. src/runtime/mgc.go

    			// mark using checkmark bits, to check that we
    			// didn't forget to mark anything during the
    			// concurrent mark process.
    			startCheckmarks()
    			gcResetMarkState()
    			gcw := &getg().m.p.ptr().gcw
    			gcDrain(gcw, 0)
    			wbBufFlush1(getg().m.p.ptr())
    			gcw.dispose()
    			endCheckmarks()
    		}
    
    		// marking is complete so we can turn the write barrier off
    		setGCPhase(_GCoff)
    		stwSwept = gcSweep(work.mode)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 16:25:21 UTC 2024
    - 62K bytes
    - Viewed (0)
  3. src/runtime/mprof.go

    	var stw worldStop
    	if all {
    		stw = stopTheWorld(stwAllGoroutinesStack)
    	}
    
    	n := 0
    	if len(buf) > 0 {
    		gp := getg()
    		sp := getcallersp()
    		pc := getcallerpc()
    		systemstack(func() {
    			g0 := getg()
    			// Force traceback=1 to override GOTRACEBACK setting,
    			// so that Stack's results are consistent.
    			// GOTRACEBACK is only about crash dumps.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:37 UTC 2024
    - 53.3K bytes
    - Viewed (0)
  4. src/runtime/traceback.go

    	}
    	call(cgoSymbolizer, noescape(unsafe.Pointer(arg)))
    }
    
    // cgoContextPCs gets the PC values from a cgo traceback.
    func cgoContextPCs(ctxt uintptr, buf []uintptr) {
    	if cgoTraceback == nil {
    		return
    	}
    	call := cgocall
    	if panicking.Load() > 0 || getg().m.curg != getg() {
    		// We do not want to call into the scheduler when panicking
    		// or when on the system stack.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 16:25:21 UTC 2024
    - 55.1K bytes
    - Viewed (0)
  5. src/runtime/mbitmap.go

    		// Make sure we keep ep alive. We may have stopped referencing
    		// ep's data pointer sometime before this point and it's possible
    		// for that memory to get freed.
    		KeepAlive(ep)
    		return
    	}
    
    	// stack
    	if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi {
    		found := false
    		var u unwinder
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 00:18:55 UTC 2024
    - 60K bytes
    - Viewed (0)
  6. src/runtime/malloc.go

    				} else if size&1 == 0 {
    					align = 2
    				} else {
    					align = 1
    				}
    			}
    			return persistentalloc(size, align, &memstats.other_sys)
    		}
    
    		if inittrace.active && inittrace.id == getg().goid {
    			// Init functions are executed sequentially in a single goroutine.
    			inittrace.allocs += 1
    		}
    	}
    
    	// assistG is the G to charge for this allocation, or nil if
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 59.6K bytes
    - Viewed (0)
  7. src/runtime/mgcscavenge.go

    	lock(&s.lock)
    	if getg() != s.g {
    		throw("tried to sleep scavenger from another goroutine")
    	}
    
    	if worked < minScavWorkTime {
    		// This means there wasn't enough work to actually fill up minScavWorkTime.
    		// That's fine; we shouldn't try to do anything with this information
    		// because it's going result in a short enough sleep request that things
    		// will get messy. Just assume we did at least this much work.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:48:45 UTC 2024
    - 52.3K bytes
    - Viewed (0)
  8. src/runtime/mheap.go

    type mSpanStateBox struct {
    	s atomic.Uint8
    }
    
    // It is nosplit to match get, below.
    
    //go:nosplit
    func (b *mSpanStateBox) set(s mSpanState) {
    	b.s.Store(uint8(s))
    }
    
    // It is nosplit because it's called indirectly by typedmemclr,
    // which must not be preempted.
    
    //go:nosplit
    func (b *mSpanStateBox) get() mSpanState {
    	return mSpanState(b.s.Load())
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 78K bytes
    - Viewed (0)
  9. src/cmd/go/internal/modget/get.go

    	CmdGet.Run = runGet // break init loop
    	CmdGet.Flag.Var(&getD, "d", "")
    	CmdGet.Flag.Var(&getU, "u", "")
    }
    
    func runGet(ctx context.Context, cmd *base.Command, args []string) {
    	switch getU.version {
    	case "", "upgrade", "patch":
    		// ok
    	default:
    		base.Fatalf("go: unknown upgrade flag -u=%s", getU.rawVersion)
    	}
    	if getD.set {
    		if !getD.value {
    			base.Fatalf("go: -d flag may not be set to false")
    		}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jun 07 18:26:32 UTC 2024
    - 66.5K bytes
    - Viewed (0)
  10. src/runtime/mgcpacer.go

    			c.setMaxIdleMarkWorkers(0)
    		} else {
    			// TODO(mknyszek): The fundamental reason why we need this is because
    			// we can't count on the fractional mark worker to get scheduled.
    			// Fix that by ensuring it gets scheduled according to its quota even
    			// if the rest of the application is idle.
    			c.setMaxIdleMarkWorkers(1)
    		}
    	} else {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 55.4K bytes
    - Viewed (0)
Back to top