- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 41 for stw (0.12 sec)
-
src/runtime/os_linux.go
} // STW to guarantee that user goroutines see an atomic change to thread // state. Without STW, goroutines could migrate Ms while change is in // progress and e.g., see state old -> new -> old -> new. // // N.B. Internally, this function does not depend on STW to // successfully change every thread. It is only needed for user // expectations, per above. stw := stopTheWorld(stwAllThreadsSyscall)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 25.9K bytes - Viewed (0) -
src/sync/pool.go
var ( allPoolsMu Mutex // allPools is the set of pools that have non-empty primary // caches. Protected by either 1) allPoolsMu and pinning or 2) // STW. allPools []*Pool // oldPools is the set of pools that may have non-empty victim // caches. Protected by STW. oldPools []*Pool ) func init() { runtime_registerPoolCleanup(poolCleanup) } func indexLocal(l unsafe.Pointer, i int) *poolLocal {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 21:14:51 UTC 2024 - 9.4K bytes - Viewed (0) -
src/internal/trace/internal/oldtrace/parser_test.go
for i := 0; i < res.Events.Len(); i++ { ev := res.Events.Ptr(i) if ver >= 21 { if ev.Type == EvSTWStart && res.Strings[ev.Args[0]] == "unknown" { t.Errorf("found unknown STW event; update stwReasonStrings?") } } } } func TestBuckets(t *testing.T) { var evs Events const N = eventsBucketSize*3 + 123 for i := 0; i < N; i++ { evs.append(Event{Ts: Timestamp(i)})
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 3.4K bytes - Viewed (0) -
src/runtime/os3_plan9.go
// license that can be found in the LICENSE file. package runtime import ( "internal/abi" "internal/goarch" "internal/stringslite" "unsafe" ) // May run during STW, so write barriers are not allowed. // //go:nowritebarrierrec func sighandler(_ureg *ureg, note *byte, gp *g) int { gsignal := getg() mp := gsignal.m var t sigTabT var docrash bool var sig int
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 15:41:45 UTC 2024 - 4K bytes - Viewed (0) -
src/runtime/mstats.go
mcache_sys sysMemStat buckhash_sys sysMemStat // profiling bucket hash table // Statistics about GC overhead. gcMiscSys sysMemStat // updated atomically or during STW // Miscellaneous statistics. other_sys sysMemStat // updated atomically or during STW // Statistics about the garbage collector. // Protected by mheap or worldsema during GC. last_gc_unix uint64 // last gc (in unix time) pause_total_ns uint64
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 08 21:03:13 UTC 2024 - 34.2K bytes - Viewed (0) -
src/runtime/trace.go
// This is necessary to ensure the consistency of the STW events. If we're feeling // adventurous we could lift this restriction and add a STWActive event, but the // cost of maintaining this consistency is low. We're not going to hold this semaphore // for very long and most STW periods are very short. // Once we hold worldsema, prevent preemption as well so we're not interrupted partway
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 21:17:41 UTC 2024 - 37.1K bytes - Viewed (0) -
src/runtime/export_test.go
var ReadUnaligned64 = readUnaligned64 func CountPagesInUse() (pagesInUse, counted uintptr) { stw := stopTheWorld(stwForTestCountPagesInUse) pagesInUse = mheap_.pagesInUse.Load() for _, s := range mheap_.allspans { if s.state.get() == mSpanInUse { counted += s.npages } } startTheWorld(stw) return } func Fastrand() uint32 { return uint32(rand()) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:50:53 UTC 2024 - 46.1K bytes - Viewed (0) -
src/runtime/heapdump.go
package runtime import ( "internal/abi" "internal/goarch" "unsafe" ) //go:linkname runtime_debug_WriteHeapDump runtime/debug.WriteHeapDump func runtime_debug_WriteHeapDump(fd uintptr) { stw := stopTheWorld(stwWriteHeapDump) // Keep m on this G's stack instead of the system stack. // Both readmemstats_m and writeheapdump_m have pretty large // peak stack depths and we risk blowing the system stack.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 09 04:07:57 UTC 2024 - 17.6K bytes - Viewed (0) -
src/runtime/mspanset.go
// system. We never release spine memory because there could be // concurrent lock-free access and we're likely to reuse it // anyway. (In principle, we could do this during STW.) spineLock mutex spine atomicSpanSetSpinePointer // *[N]atomic.Pointer[spanSetBlock] spineLen atomic.Uintptr // Spine array length
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 13.1K bytes - Viewed (0) -
src/runtime/tracestatus.go
// writeProcStatusForP emits a ProcStatus event for the provided p based on its status. // // The caller must fully own pp and it must be prevented from transitioning (e.g. this can be // called by a forEachP callback or from a STW). func (w traceWriter) writeProcStatusForP(pp *p, inSTW bool) traceWriter { if !pp.trace.acquireStatus(w.gen) { return w } var status traceProcStatus switch pp.status { case _Pidle, _Pgcstop:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 15 17:03:35 UTC 2024 - 7.2K bytes - Viewed (0)