Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 25 for stw (0.07 sec)

  1. src/runtime/mspanset.go

    	// system. We never release spine memory because there could be
    	// concurrent lock-free access and we're likely to reuse it
    	// anyway. (In principle, we could do this during STW.)
    
    	spineLock mutex
    	spine     atomicSpanSetSpinePointer // *[N]atomic.Pointer[spanSetBlock]
    	spineLen  atomic.Uintptr            // Spine array length
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 13.1K bytes
    - Viewed (0)
  2. src/runtime/metrics_test.go

    	}
    	t.Errorf(`time.Sleep did not contribute enough to "idle" class: minimum idle time = %.5fs`, minIdleCPUSeconds)
    }
    
    // Call f() and verify that the correct STW metrics increment. If isGC is true,
    // fn triggers a GC STW. Otherwise, fn triggers an other STW.
    func testSchedPauseMetrics(t *testing.T, fn func(t *testing.T), isGC bool) {
    	m := []metrics.Sample{
    		{Name: "/sched/pauses/stopping/gc:seconds"},
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:52:17 UTC 2024
    - 45K bytes
    - Viewed (0)
  3. src/runtime/traceruntime.go

    	trace.seqGC++
    }
    
    // STWStart traces a STWBegin event.
    func (tl traceLocker) STWStart(reason stwReason) {
    	// Although the current P may be in _Pgcstop here, we model the P as running during the STW. This deviates from the
    	// runtime's state tracking, but it's more accurate and doesn't result in any loss of information.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 25.7K bytes
    - Viewed (0)
  4. src/runtime/HACKING.md

      the locked region, reads do not need to be atomic, but the write
      does. Outside the locked region, reads need to be atomic.
    
    * Reads that only happen during STW, where no writes can happen during
      STW, do not need to be atomic.
    
    That said, the advice from the Go memory model stands: "Don't be
    [too] clever." The performance of the runtime matters, but its
    robustness matters more.
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 13.9K bytes
    - Viewed (0)
  5. src/runtime/extern.go

    		# MB globals scannable global size, or /gc/scan/globals:bytes
    		# P          number of processors used, or /sched/gomaxprocs:threads
    	The phases are stop-the-world (STW) sweep termination, concurrent
    	mark and scan, and STW mark termination. The CPU times
    	for mark/scan are broken down in to assist time (GC performed in
    	line with allocation), background GC time, and idle GC time.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:52:17 UTC 2024
    - 18.9K bytes
    - Viewed (0)
  6. src/cmd/vendor/golang.org/x/arch/ppc64/ppc64asm/gnu.go

    	case LWA, LWAX, LWAUX:
    		return true
    	case LD, LDU, LDX, LDUX:
    		return true
    	case LQ:
    		return true
    	case STB, STBU, STBX, STBUX:
    		return true
    	case STH, STHU, STHX, STHUX:
    		return true
    	case STW, STWU, STWX, STWUX:
    		return true
    	case STD, STDU, STDX, STDUX:
    		return true
    	case STQ:
    		return true
    	case LHBRX, LWBRX, STHBRX, STWBRX:
    		return true
    	case LBARX, LWARX, LHARX, LDARX:
    		return true
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Oct 19 23:33:33 UTC 2023
    - 12.2K bytes
    - Viewed (0)
  7. src/cmd/internal/obj/ppc64/doc.go

    	MOVWZ (R3+R5), R4		<=>	lwzx r4,r3,r5
    	MOVHZ  (R3), R4		<=>	lhz r4,0(r3)
    	MOVHU 2(R3), R4		<=>	lhau r4,2(r3)
    	MOVBZ (R3), R4		<=>	lbz r4,0(r3)
    
    	MOVD R4,(R3)		<=>	std r4,0(r3)
    	MOVW R4,(R3)		<=>	stw r4,0(r3)
    	MOVW R4,(R3+R5)		<=>	stwx r4,r3,r5
    	MOVWU R4,4(R3)		<=>	stwu r4,4(r3)
    	MOVH R4,2(R3)		<=>	sth r4,2(r3)
    	MOVBU R4,(R3)(R5)		<=>	stbux r4,r3,r5
    
    4. Compares
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Apr 21 16:47:45 UTC 2023
    - 11.3K bytes
    - Viewed (0)
  8. src/internal/trace/oldtrace.go

    }
    
    const (
    	// Block reasons
    	sForever = iota
    	sPreempted
    	sGosched
    	sSleep
    	sChanSend
    	sChanRecv
    	sNetwork
    	sSync
    	sSyncCond
    	sSelect
    	sEmpty
    	sMarkAssistWait
    
    	// STW kinds
    	sSTWUnknown
    	sSTWGCMarkTermination
    	sSTWGCSweepTermination
    	sSTWWriteHeapDump
    	sSTWGoroutineProfile
    	sSTWGoroutineProfileCleanup
    	sSTWAllGoroutinesStackTrace
    	sSTWReadMemStats
    	sSTWAllThreadsSyscall
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:48:18 UTC 2024
    - 17.2K bytes
    - Viewed (0)
  9. src/runtime/metrics.go

    // compute populates the cpuStatsAggregate with values from the runtime.
    func (a *cpuStatsAggregate) compute() {
    	a.cpuStats = work.cpuStats
    	// TODO(mknyszek): Update the CPU stats again so that we're not
    	// just relying on the STW snapshot. The issue here is that currently
    	// this will cause non-monotonicity in the "user" CPU time metric.
    	//
    	// a.cpuStats.accumulate(nanotime(), gcphase == _GCmark)
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Apr 08 21:03:13 UTC 2024
    - 26K bytes
    - Viewed (0)
  10. src/internal/trace/internal/oldtrace/parser.go

    			}
    			evGC = nil
    		case EvSTWStart:
    			evp := &evSTW
    			if *evp != nil {
    				return fmt.Errorf("previous STW is not ended before a new one (time %d)", ev.Ts)
    			}
    			*evp = ev
    		case EvSTWDone:
    			evp := &evSTW
    			if *evp == nil {
    				return fmt.Errorf("bogus STW end (time %d)", ev.Ts)
    			}
    			*evp = nil
    		case EvGCSweepStart:
    			p := ps[ev.P]
    			if p.evSweep != nil {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 24 21:15:28 UTC 2024
    - 46.8K bytes
    - Viewed (0)
Back to top