- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 41 for getc (0.28 sec)
-
src/runtime/traceback.go
} call(cgoSymbolizer, noescape(unsafe.Pointer(arg))) } // cgoContextPCs gets the PC values from a cgo traceback. func cgoContextPCs(ctxt uintptr, buf []uintptr) { if cgoTraceback == nil { return } call := cgocall if panicking.Load() > 0 || getg().m.curg != getg() { // We do not want to call into the scheduler when panicking // or when on the system stack.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 16:25:21 UTC 2024 - 55.1K bytes - Viewed (0) -
src/runtime/runtime1.go
} return res } // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block. //go:nosplit func acquirem() *m { gp := getg() gp.m.locks++ return gp.m } //go:nosplit func releasem(mp *m) { gp := getg() mp.locks-- if mp.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack gp.stackguard0 = stackPreempt } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:52:17 UTC 2024 - 19.3K bytes - Viewed (0) -
src/runtime/mgcsweep.go
gp := getg() if gp.m.locks == 0 && gp.m.mallocing == 0 && gp != gp.m.g0 { throw("mspan.sweep: m is not locked") } s := sl.mspan if !preserve { // We'll release ownership of this span. Nil it out to // prevent the caller from accidentally using it. sl.mspan = nil } sweepgen := mheap_.sweepgen if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:52:18 UTC 2024 - 32.9K bytes - Viewed (0) -
src/runtime/syscall_windows.go
// calls back into Go. c := &getg().m.winsyscall c.fn = fn c.n = uintptr(len(args)) if c.n != 0 { c.args = uintptr(noescape(unsafe.Pointer(&args[0]))) } cgocall(asmstdcallAddr, unsafe.Pointer(c)) // cgocall may reschedule us on to a different M, // but it copies the return values into the new M's // so we can read them from there. c = &getg().m.winsyscall return c.r1, c.r2, c.err
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 20:12:46 UTC 2024 - 16.6K bytes - Viewed (0) -
platforms/software/dependency-management/src/test/groovy/org/gradle/api/internal/catalog/LibrariesSourceGeneratorTest.groovy
bundle('myBundle', ['foo', 'bar']) plugin('pl', 'org.plugin') version('1.2') } then: def libs = sources.compile() def foo = libs.foo.get() def bar = libs.bar.get() assert foo.module.group == 'g' assert foo.module.name == 'a' assert foo.versionConstraint.requiredVersion == 'v' assert bar.module.group == 'g2'
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Thu Apr 18 08:26:24 UTC 2024 - 19.2K bytes - Viewed (0) -
src/cmd/link/internal/ld/lib.go
pkg := objabi.PathToPrefix(lib.Pkg) eof := f.Offset() + length start := f.Offset() c1 := bgetc(f) c2 := bgetc(f) c3 := bgetc(f) c4 := bgetc(f) f.MustSeek(start, 0) unit := &sym.CompilationUnit{Lib: lib} lib.Units = append(lib.Units, unit) magic := uint32(c1)<<24 | uint32(c2)<<16 | uint32(c3)<<8 | uint32(c4)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 21 18:45:27 UTC 2024 - 88.6K bytes - Viewed (0) -
src/runtime/malloc.go
} else if size&1 == 0 { align = 2 } else { align = 1 } } return persistentalloc(size, align, &memstats.other_sys) } if inittrace.active && inittrace.id == getg().goid { // Init functions are executed sequentially in a single goroutine. inittrace.allocs += 1 } } // assistG is the G to charge for this allocation, or nil if
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 59.6K bytes - Viewed (0) -
src/runtime/traceruntime.go
// buffer flushes are rare. Record the lock edge even if it doesn't happen // this time. lockRankMayTraceFlush() // Check if we're already locked. If so, return an invalid traceLocker. if getg().m.trace.seqlock.Load()%2 == 1 { return traceLocker{} } return traceAcquireEnabled() } // ok returns true if the traceLocker is valid (i.e. tracing is enabled). //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 25.7K bytes - Viewed (0) -
src/runtime/runtime.go
// from nanotime that we can use (some platforms have a really coarse system time granularity). // We require some amount of time to pass to ensure that the conversion rate is fairly accurate // in aggregate. But because we compute this rate lazily, there's a pretty good chance a decent // amount of time has passed by the time we get here. //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:16:47 UTC 2024 - 9.9K bytes - Viewed (0) -
src/runtime/mheap.go
type mSpanStateBox struct { s atomic.Uint8 } // It is nosplit to match get, below. //go:nosplit func (b *mSpanStateBox) set(s mSpanState) { b.s.Store(uint8(s)) } // It is nosplit because it's called indirectly by typedmemclr, // which must not be preempted. //go:nosplit func (b *mSpanStateBox) get() mSpanState { return mSpanState(b.s.Load()) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 78K bytes - Viewed (0)