Search Options

Results per page
Sort
Preferred Languages
Advance

Results 61 - 70 of 511 for Stack0 (0.13 sec)

  1. src/runtime/trace.go

    	// Read everything out of the last gen's CPU profile buffer.
    	traceReadCPU(gen)
    
    	// Flush CPU samples, stacks, and strings for the last generation. This is safe,
    	// because we're now certain no M is writing to the last generation.
    	//
    	// Ordering is important here. traceCPUFlush may generate new stacks and dumping
    	// stacks may generate new strings.
    	traceCPUFlush(gen)
    	trace.stackTab[gen%2].dump(gen)
    	trace.typeTab[gen%2].dump(gen)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 21:17:41 UTC 2024
    - 37.1K bytes
    - Viewed (0)
  2. maven-core/src/main/java/org/apache/maven/execution/scope/internal/MojoExecutionScope.java

        public void enter() {
            LinkedList<ScopeState> stack = values.get();
            if (stack == null) {
                stack = new LinkedList<>();
                values.set(stack);
            }
            stack.addFirst(new ScopeState());
        }
    
        private ScopeState getScopeState() {
            LinkedList<ScopeState> stack = values.get();
            if (stack == null || stack.isEmpty()) {
                throw new IllegalStateException();
    Registered: Wed Jun 12 09:55:16 UTC 2024
    - Last Modified: Tue Jun 11 07:23:04 UTC 2024
    - 5.7K bytes
    - Viewed (0)
  3. src/runtime/preempt.go

    // stack.
    //
    // Synchronous safe-points are implemented by overloading the stack
    // bound check in function prologues. To preempt a goroutine at the
    // next synchronous safe-point, the runtime poisons the goroutine's
    // stack bound to a value that will cause the next stack bound check
    // to fail and enter the stack growth implementation, which will
    // detect that it was actually a preemption and redirect to preemption
    // handling.
    //
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 15:41:45 UTC 2024
    - 15.1K bytes
    - Viewed (0)
  4. src/cmd/trace/goroutinegen.go

    	gs.augmentName(st.Stack)
    
    	// Handle the goroutine state transition.
    	from, to := st.Goroutine()
    	if from == to {
    		// Filter out no-op events.
    		return
    	}
    	if from.Executing() && !to.Executing() {
    		if to == trace.GoWaiting {
    			// Goroutine started blocking.
    			gs.block(ev.Time(), ev.Stack(), st.Reason, ctx)
    		} else {
    			gs.stop(ev.Time(), ev.Stack(), ctx)
    		}
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:48:18 UTC 2024
    - 4.6K bytes
    - Viewed (0)
  5. src/runtime/debug.go

    //
    // This must be deeply nosplit because it is called from a function
    // prologue before the stack is set up and because the compiler will
    // call it from any splittable prologue (leading to infinite
    // recursion).
    //
    // Ideally it should also use very little stack because the linker
    // doesn't currently account for this in nosplit stack depth checking.
    //
    // Ensure mayMoreStackPreempt can be called for all ABIs.
    //
    //go:nosplit
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sat May 11 20:38:24 UTC 2024
    - 4.2K bytes
    - Viewed (0)
  6. src/cmd/trace/threadgen.go

    	gs.augmentName(st.Stack)
    
    	// Handle the goroutine state transition.
    	from, to := st.Goroutine()
    	if from == to {
    		// Filter out no-op events.
    		return
    	}
    	if from.Executing() && !to.Executing() {
    		if to == trace.GoWaiting {
    			// Goroutine started blocking.
    			gs.block(ev.Time(), ev.Stack(), st.Reason, ctx)
    		} else {
    			gs.stop(ev.Time(), ev.Stack(), ctx)
    		}
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:48:18 UTC 2024
    - 5.7K bytes
    - Viewed (0)
  7. src/cmd/trace/procgen.go

    	gs.augmentName(st.Stack)
    
    	// Handle the goroutine state transition.
    	from, to := st.Goroutine()
    	if from == to {
    		// Filter out no-op events.
    		return
    	}
    	if from == trace.GoRunning && !to.Executing() {
    		if to == trace.GoWaiting {
    			// Goroutine started blocking.
    			gs.block(ev.Time(), ev.Stack(), st.Reason, ctx)
    		} else {
    			gs.stop(ev.Time(), ev.Stack(), ctx)
    		}
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:48:18 UTC 2024
    - 6.1K bytes
    - Viewed (0)
  8. src/internal/trace/testdata/testprog/annotations.go

    	}
    
    	// Beginning of traced execution.
    	var wg sync.WaitGroup
    	ctx, task := trace.NewTask(bgctx, "task0") // EvUserTaskCreate("task0")
    	trace.StartRegion(ctx, "task0 region")
    
    	wg.Add(1)
    	go func() {
    		defer wg.Done()
    		defer task.End() // EvUserTaskEnd("task0")
    
    		trace.StartRegion(ctx, "unended region")
    
    		trace.WithRegion(ctx, "region0", func() {
    			// EvUserRegionBegin("region0", start)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:48:18 UTC 2024
    - 1.4K bytes
    - Viewed (0)
  9. src/runtime/checkptr.go

    //
    //go:linkname checkptrBase
    func checkptrBase(p unsafe.Pointer) uintptr {
    	// stack
    	if gp := getg(); gp.stack.lo <= uintptr(p) && uintptr(p) < gp.stack.hi {
    		// TODO(mdempsky): Walk the stack to identify the
    		// specific stack frame or even stack object that p
    		// points into.
    		//
    		// In the mean time, use "1" as a pseudo-address to
    		// represent the stack. This is an invalid address on
    		// all platforms, so it's guaranteed to be distinct
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 00:18:55 UTC 2024
    - 3.6K bytes
    - Viewed (0)
  10. src/internal/profilerecord/profilerecord.go

    // records with deep stack traces.
    //
    // TODO: Consider moving this to internal/runtime, see golang.org/issue/65355.
    package profilerecord
    
    type StackRecord struct {
    	Stack []uintptr
    }
    
    type MemProfileRecord struct {
    	AllocBytes, FreeBytes     int64
    	AllocObjects, FreeObjects int64
    	Stack                     []uintptr
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue May 21 14:38:45 UTC 2024
    - 815 bytes
    - Viewed (0)
Back to top