- Sort Score
- Result 10 results
- Languages All
Results 101 - 110 of 119 for Setg (0.1 sec)
-
src/runtime/arena.go
} // Prevent preemption as we set up the space for a new object. // // Act like we're allocating. mp := acquirem() if mp.mallocing != 0 { throw("malloc deadlock") } if mp.gsignal == getg() { throw("malloc during signal") } mp.mallocing = 1 var ptr unsafe.Pointer if !typ.Pointers() { // Allocate pointer-less objects from the tail end of the chunk.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:44:56 UTC 2024 - 37.9K bytes - Viewed (0) -
src/runtime/mgcscavenge.go
// // Must be called from a regular goroutine that can allocate. func (s *scavengerState) init() { if s.g != nil { throw("scavenger state is already wired") } lockInit(&s.lock, lockRankScavenge) s.g = getg() s.timer = new(timer) f := func(s any, _ uintptr, _ int64) { s.(*scavengerState).wake() } s.timer.init(f, s) // input: fraction of CPU time actually used. // setpoint: ideal CPU fraction.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:48:45 UTC 2024 - 52.3K bytes - Viewed (0) -
src/runtime/chan.go
} c.qcount++ unlock(&c.lock) return true } if !block { unlock(&c.lock) return false } // Block on the channel. Some receiver will complete our operation for us. gp := getg() mysg := acquireSudog() mysg.releasetime = 0 if t0 != 0 { mysg.releasetime = -1 } // No stack splits between assigning elem and enqueuing mysg // on gp.waiting where copystack can find it.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:16:50 UTC 2024 - 25.9K bytes - Viewed (0) -
src/runtime/mstats.go
// // nosplit because a stack growth in this function could // lead to a stack allocation that could reenter the // function. // //go:nosplit func (m *consistentHeapStats) acquire() *heapStatsDelta { if pp := getg().m.p.ptr(); pp != nil { seq := pp.statsSeq.Add(1) if seq%2 == 0 { // Should have been incremented to odd. print("runtime: seq=", seq, "\n") throw("bad sequence number") } } else {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 08 21:03:13 UTC 2024 - 34.2K bytes - Viewed (0) -
src/runtime/mheap.go
// the only place it is used now. In the future, this requirement // may be relaxed if its use is necessary elsewhere. // //go:systemstack func (h *mheap) tryAllocMSpan() *mspan { pp := getg().m.p.ptr() // If we don't have a p or the cache is empty, we can't do // anything here. if pp == nil || pp.mspancache.len == 0 { return nil } // Pull off the last entry in the cache.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 78K bytes - Viewed (0) -
src/runtime/select.go
} if c.qcount < c.dataqsiz { goto bufsend } } } if !block { selunlock(scases, lockorder) casi = -1 goto retc } // pass 2 - enqueue on all chans gp = getg() if gp.waiting != nil { throw("gp.waiting != nil") } nextp = &gp.waiting for _, casei := range lockorder { casi = int(casei) cas = &scases[casi] c = cas.c sg := acquireSudog()
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Mar 13 21:36:04 UTC 2024 - 15K bytes - Viewed (0) -
src/runtime/string.go
} // stringDataOnStack reports whether the string's data is // stored on the current goroutine's stack. func stringDataOnStack(s string) bool { ptr := uintptr(unsafe.Pointer(unsafe.StringData(s))) stk := getg().stack return stk.lo <= ptr && ptr < stk.hi } func rawstringtmp(buf *tmpBuf, l int) (s string, b []byte) { if buf != nil && l <= len(buf) { b = buf[:l] s = slicebytetostringtmp(&b[0], len(b))
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:17:26 UTC 2024 - 13.4K bytes - Viewed (0) -
src/internal/trace/trace_test.go
} // Examine the execution tracer's view of the CPU profile samples. Filter it // to only include samples from the single test goroutine. Use the goroutine // ID that was recorded in the events: that should reflect getg().m.curg, // same as the profiler's labels (even when the M is using its g0 stack). totalTraceSamples := 0 traceSamples := 0 traceStacks := make(map[string]int) r, err := trace.NewReader(bytes.NewReader(tb))
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 18.5K bytes - Viewed (0) -
src/runtime/debuglog.go
} // Reserve space for framing header. l.w.ensure(debugLogHeaderSize) l.w.write += debugLogHeaderSize // Write record header. l.w.uvarint(tick - l.w.tick) l.w.uvarint(nano - l.w.nano) gp := getg() if gp != nil && gp.m != nil && gp.m.p != 0 { l.w.varint(int64(gp.m.p.ptr().id)) } else { l.w.varint(-1) } return l } // A dlogger writes to the debug log. //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 02 15:10:48 UTC 2024 - 18.3K bytes - Viewed (0) -
src/runtime/heapdump.go
dumpgs() dumpms() dumproots() dumpmemstats(m) dumpmemprof() dumpint(tagEOF) flush() } func writeheapdump_m(fd uintptr, m *MemStats) { assertWorldStopped() gp := getg() casGToWaiting(gp.m.curg, _Grunning, waitReasonDumpingHeap) // Set dump file. dumpfd = fd // Call dump routine. mdump(m) // Reset dump file. dumpfd = 0 if tmpbuf != nil {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 09 04:07:57 UTC 2024 - 17.6K bytes - Viewed (0)