Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 18 of 18 for growstack (0.14 sec)

  1. src/runtime/debug_test.go

    	g, after := startDebugCallWorker(t)
    	defer after()
    
    	// Inject a call that grows the stack. debugCallWorker checks
    	// for stack pointer breakage.
    	if _, err := runtime.InjectDebugCall(g, func() { growStack(nil) }, nil, nil, debugCallTKill, false); err != nil {
    		t.Fatal(err)
    	}
    }
    
    //go:nosplit
    func debugCallUnsafePointWorker(gpp **runtime.G, ready, stop *uint32) {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Sep 08 15:08:04 UTC 2023
    - 8K bytes
    - Viewed (0)
  2. src/runtime/pprof/pprof_test.go

    	testCPUProfile(t, matches, func(duration time.Duration) {
    		t := time.After(duration)
    		c := make(chan bool)
    		for {
    			go func() {
    				growstack1()
    				c <- true
    			}()
    			select {
    			case <-t:
    				return
    			case <-c:
    			}
    		}
    	})
    }
    
    //go:noinline
    func growstack1() {
    	growstack(10)
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 18:42:28 UTC 2024
    - 68.8K bytes
    - Viewed (0)
  3. src/runtime/mprof.go

    				// saveblockevent)
    				mp.profStack[0] -= 1
    			}
    			nstk += fpTracebackPCs(unsafe.Pointer(getfp()), mp.profStack[1:])
    		} else {
    			mp.profStack[1] = gp.m.curg.sched.pc
    			nstk += 1 + fpTracebackPCs(unsafe.Pointer(gp.m.curg.sched.bp), mp.profStack[2:])
    		}
    	}
    
    	saveBlockEventStack(cycles, rate, mp.profStack[:nstk], which)
    	releasem(mp)
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:37 UTC 2024
    - 53.3K bytes
    - Viewed (0)
  4. src/internal/trace/traceviewer/emitter.go

    		Time:  viewerTime(a.Start),
    		Stack: a.FromStack,
    	})
    	e.OptionalEvent(&format.Event{
    		Name:  a.Name,
    		Phase: "t",
    		TID:   a.ToResource,
    		PID:   sectionID,
    		ID:    e.arrowSeq,
    		Time:  viewerTime(a.End),
    	})
    }
    
    type ArrowEvent struct {
    	Name         string
    	Start        time.Duration
    	End          time.Duration
    	FromResource uint64
    	FromStack    int
    	ToResource   uint64
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Nov 21 21:29:58 UTC 2023
    - 20.4K bytes
    - Viewed (0)
  5. src/cmd/trace/gstate.go

    			Name:         gs.startCause.name,
    			Start:        ctx.elapsed(gs.startCause.time),
    			End:          ctx.elapsed(ts),
    			FromResource: uint64(gs.startCause.resource),
    			ToResource:   uint64(resource),
    			FromStack:    ctx.Stack(viewerFrames(gs.startCause.stack)),
    		})
    		gs.startCause.time = 0
    		gs.startCause.name = ""
    		gs.startCause.resource = 0
    		gs.startCause.stack = trace.NoStack
    	}
    	gs.executing = resource
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:48:18 UTC 2024
    - 11.9K bytes
    - Viewed (0)
  6. src/cmd/trace/gen.go

    			Name:         "newTask",
    			Start:        ctx.elapsed(task.Start.Time()),
    			End:          ctx.elapsed(task.Start.Time()),
    			FromResource: uint64(task.Parent.ID),
    			ToResource:   uint64(task.ID),
    			FromStack:    ctx.Stack(viewerFrames(task.Start.Stack())),
    		})
    	}
    }
    
    // emitRegion emits goroutine-based slice events to the UI. The caller
    // must be emitting for a goroutine-oriented trace.
    //
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:48:18 UTC 2024
    - 11.5K bytes
    - Viewed (0)
  7. src/runtime/runtime2.go

    	lockedInt     uint32      // tracking for internal lockOSThread
    	nextwaitm     muintptr    // next m waiting for lock
    
    	mLockProfile mLockProfile // fields relating to runtime.lock contention
    	profStack    []uintptr    // used for memory/block/mutex stack traces
    
    	// wait* are used to carry arguments from gopark into park_m, because
    	// there's no stack to put them on. That is their sole purpose.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:37 UTC 2024
    - 47.9K bytes
    - Viewed (0)
  8. src/runtime/proc.go

    // TODO(mknyszek): Implement lazy allocation if this becomes a problem.
    func mProfStackInit(mp *m) {
    	if debug.profstackdepth == 0 {
    		// debug.profstack is set to 0 by the user, or we're being called from
    		// schedinit before parsedebugvars.
    		return
    	}
    	mp.profStack = makeProfStackFP()
    	mp.mLockProfile.stack = makeProfStackFP()
    }
    
    // makeProfStackFP creates a buffer large enough to hold a maximum-sized stack
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 207.5K bytes
    - Viewed (0)
Back to top