Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 47 for stw (0.04 sec)

  1. src/cmd/internal/obj/ppc64/doc.go

    	MOVWZ (R3+R5), R4		<=>	lwzx r4,r3,r5
    	MOVHZ  (R3), R4		<=>	lhz r4,0(r3)
    	MOVHU 2(R3), R4		<=>	lhau r4,2(r3)
    	MOVBZ (R3), R4		<=>	lbz r4,0(r3)
    
    	MOVD R4,(R3)		<=>	std r4,0(r3)
    	MOVW R4,(R3)		<=>	stw r4,0(r3)
    	MOVW R4,(R3+R5)		<=>	stwx r4,r3,r5
    	MOVWU R4,4(R3)		<=>	stwu r4,4(r3)
    	MOVH R4,2(R3)		<=>	sth r4,2(r3)
    	MOVBU R4,(R3)(R5)		<=>	stbux r4,r3,r5
    
    4. Compares
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Apr 21 16:47:45 UTC 2023
    - 11.3K bytes
    - Viewed (0)
  2. src/internal/trace/oldtrace.go

    }
    
    const (
    	// Block reasons
    	sForever = iota
    	sPreempted
    	sGosched
    	sSleep
    	sChanSend
    	sChanRecv
    	sNetwork
    	sSync
    	sSyncCond
    	sSelect
    	sEmpty
    	sMarkAssistWait
    
    	// STW kinds
    	sSTWUnknown
    	sSTWGCMarkTermination
    	sSTWGCSweepTermination
    	sSTWWriteHeapDump
    	sSTWGoroutineProfile
    	sSTWGoroutineProfileCleanup
    	sSTWAllGoroutinesStackTrace
    	sSTWReadMemStats
    	sSTWAllThreadsSyscall
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:48:18 UTC 2024
    - 17.2K bytes
    - Viewed (0)
  3. src/runtime/mgcmark.go

    //
    // If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work
    // credit to gcController.bgScanCredit every gcCreditSlack units of
    // scan work.
    //
    // gcDrain will always return if there is a pending STW or forEachP.
    //
    // Disabling write barriers is necessary to ensure that after we've
    // confirmed that we've drained gcw, that we don't accidentally end
    // up flipping that condition by immediately adding work in the form
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 18 21:25:11 UTC 2024
    - 52.5K bytes
    - Viewed (0)
  4. src/runtime/metrics.go

    // compute populates the cpuStatsAggregate with values from the runtime.
    func (a *cpuStatsAggregate) compute() {
    	a.cpuStats = work.cpuStats
    	// TODO(mknyszek): Update the CPU stats again so that we're not
    	// just relying on the STW snapshot. The issue here is that currently
    	// this will cause non-monotonicity in the "user" CPU time metric.
    	//
    	// a.cpuStats.accumulate(nanotime(), gcphase == _GCmark)
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Apr 08 21:03:13 UTC 2024
    - 26K bytes
    - Viewed (0)
  5. src/runtime/mheap.go

    	// allspans is a slice of all mspans ever created. Each mspan
    	// appears exactly once.
    	//
    	// The memory for allspans is manually managed and can be
    	// reallocated and move as the heap grows.
    	//
    	// In general, allspans is protected by mheap_.lock, which
    	// prevents concurrent access as well as freeing the backing
    	// store. Accesses during STW might not hold the lock, but
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 78K bytes
    - Viewed (0)
  6. src/internal/trace/internal/oldtrace/parser.go

    			}
    			evGC = nil
    		case EvSTWStart:
    			evp := &evSTW
    			if *evp != nil {
    				return fmt.Errorf("previous STW is not ended before a new one (time %d)", ev.Ts)
    			}
    			*evp = ev
    		case EvSTWDone:
    			evp := &evSTW
    			if *evp == nil {
    				return fmt.Errorf("bogus STW end (time %d)", ev.Ts)
    			}
    			*evp = nil
    		case EvGCSweepStart:
    			p := ps[ev.P]
    			if p.evSweep != nil {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 24 21:15:28 UTC 2024
    - 46.8K bytes
    - Viewed (0)
  7. src/runtime/mgcpacer.go

    	bgScanCredit atomic.Int64
    
    	// assistTime is the nanoseconds spent in mutator assists
    	// during this cycle. This is updated atomically, and must also
    	// be updated atomically even during a STW, because it is read
    	// by sysmon. Updates occur in bounded batches, since it is both
    	// written and read throughout the cycle.
    	assistTime atomic.Int64
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 55.4K bytes
    - Viewed (0)
  8. src/internal/trace/event.go

    		panic("Range called on non-Range event")
    	}
    	var r Range
    	switch e.base.typ {
    	case go122.EvSTWBegin, go122.EvSTWEnd:
    		// N.B. ordering.advance smuggles in the STW reason as e.base.args[0]
    		// for go122.EvSTWEnd (it's already there for Begin).
    		r.Name = "stop-the-world (" + e.table.strings.mustGet(stringID(e.base.args[0])) + ")"
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 12:39:00 UTC 2024
    - 28.9K bytes
    - Viewed (0)
  9. src/cmd/compile/internal/ppc64/ssa.go

    			// CMP Rarg0, $0
    			// BNE 2(PC)
    			// STW R0, 0(R0)
    			// NOP (so the BNE has somewhere to land)
    
    			// CMP Rarg0, $0
    			p := s.Prog(ppc64.ACMP)
    			p.From.Type = obj.TYPE_REG
    			p.From.Reg = v.Args[0].Reg()
    			p.To.Type = obj.TYPE_CONST
    			p.To.Offset = 0
    
    			// BNE 2(PC)
    			p2 := s.Prog(ppc64.ABNE)
    			p2.To.Type = obj.TYPE_BRANCH
    
    			// STW R0, 0(R0)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 19:59:38 UTC 2024
    - 55.4K bytes
    - Viewed (0)
  10. src/runtime/os_windows.go

    		n:    uintptr(n),
    		args: args,
    	}
    	asmstdcall_trampoline(noescape(unsafe.Pointer(&libcall)))
    	return libcall.r1
    }
    
    // Calling stdcall on os stack.
    // May run during STW, so write barriers are not allowed.
    //
    //go:nowritebarrier
    //go:nosplit
    func stdcall(fn stdFunction) uintptr {
    	gp := getg()
    	mp := gp.m
    	mp.libcall.fn = uintptr(unsafe.Pointer(fn))
    	resetLibcall := false
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Apr 26 22:55:25 UTC 2024
    - 41.5K bytes
    - Viewed (0)
Back to top