- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 45 for Grown (0.05 sec)
-
platforms/core-runtime/build-operations/src/main/java/org/gradle/internal/operations/DefaultBuildOperationListenerManager.java
// which requires atomically getting an iterator and the size. // Moreover, we iterate this list far more often that we mutate, // making a (albeit home grown) copy-on-write strategy more appealing. private List<ProgressShieldingBuildOperationListener> listeners = Collections.emptyList(); private final Lock listenersLock = new ReentrantLock();
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Tue Oct 24 14:19:43 UTC 2023 - 5.3K bytes - Viewed (0) -
src/cmd/vendor/golang.org/x/telemetry/internal/counter/file.go
m.close() } else { m1 = m } } }() v, headOff, head, ok := m.lookup(name) for !ok { // Lookup found an invalid pointer, // perhaps because the file has grown larger than the mapping. limit := m.load32(m.hdrLen + limitOff) if int64(limit) <= int64(len(m.mapping.Data)) { // Mapping doesn't need to grow, so lookup found actual corruption. debugPrintf("corrupt1\n")
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jun 04 16:19:04 UTC 2024 - 18.2K bytes - Viewed (0) -
src/bytes/buffer_test.go
func (r panicReader) Read(p []byte) (int, error) { if r.panic { panic("oops") } return 0, io.EOF } // Make sure that an empty Buffer remains empty when // it is "grown" before a Read that panics func TestReadFromPanicReader(t *testing.T) { // First verify non-panic behaviour var buf Buffer i, err := buf.ReadFrom(panicReader{}) if err != nil { t.Fatal(err) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 26 13:31:36 UTC 2024 - 18.6K bytes - Viewed (0) -
src/cmd/link/link_test.go
the earth, and it would not be wonderful to meet a Megalosaurus, forty feet long or so, waddling like an elephantine lizard up Holborn Hill. Smoke lowering down from chimney-pots, making a soft black drizzle, with flakes of soot in it as big as full-grown snowflakes—gone into mourning, one might imagine, for the death of the sun. Dogs, undistinguishable in mire. Horses, scarcely better; splashed to their very blinkers. Foot passengers, jostling one another’s umbrellas in a general infection of ill temper,...
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 24 20:26:02 UTC 2024 - 43.5K bytes - Viewed (0) -
src/encoding/binary/binary.go
return 8, nil case []float32: return 4 * len(data), nil case []float64: return 8 * len(data), nil } return 0, nil } // ensure grows buf to length len(buf) + n and returns the grown buffer // and a slice starting at the original length of buf (that is, buf2[len(buf):]). func ensure(buf []byte, n int) (buf2, pos []byte) { l := len(buf) buf = slices.Grow(buf, n)[:l+n] return buf, buf[l:]
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 17:29:31 UTC 2024 - 23.4K bytes - Viewed (0) -
src/runtime/mgcscavenge.go
sc.value.Store(ssc.pack()) } // scavChunkData tracks information about a palloc chunk for // scavenging. It packs well into 64 bits. // // The zero value always represents a valid newly-grown chunk. type scavChunkData struct { // inUse indicates how many pages in this chunk are currently // allocated. // // Only the first 10 bits are used. inUse uint16
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:48:45 UTC 2024 - 52.3K bytes - Viewed (0) -
src/runtime/os_windows.go
// osPreemptExtEnter is called before entering external code that may // call ExitProcess. // // This must be nosplit because it may be called from a syscall with // untyped stack slots, so the stack must not be grown or scanned. // //go:nosplit func osPreemptExtEnter(mp *m) { for !atomic.Cas(&mp.preemptExtLock, 0, 1) { // An asynchronous preemption is in progress. It's not
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 26 22:55:25 UTC 2024 - 41.5K bytes - Viewed (0) -
src/runtime/mpagealloc.go
if b := (offAddr{base}); b.lessThan(p.searchAddr) { p.searchAddr = b } // Add entries into chunks, which is sparse, if needed. Then, // initialize the bitmap. // // Newly-grown memory is always considered scavenged. // Set all the bits in the scavenged bitmaps high. for c := chunkIndex(base); c < chunkIndex(limit); c++ { if p.chunks[c.l1()] == nil { // Create the necessary l2 entry.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 39.2K bytes - Viewed (0) -
src/runtime/time.go
func timeSleepUntil() int64 { next := int64(maxWhen) // Prevent allp slice changes. This is like retake. lock(&allpLock) for _, pp := range allp { if pp == nil { // This can happen if procresize has grown // allp but not yet created new Ps. continue } if w := pp.timers.wakeTime(); w != 0 { next = min(next, w) } } unlock(&allpLock) return next }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Mar 29 14:36:24 UTC 2024 - 37.5K bytes - Viewed (0) -
src/runtime/mheap.go
heapArenaAlloc linearAlloc // arenaHints is a list of addresses at which to attempt to // add more heap arenas. This is initially populated with a // set of general hint addresses, and grown with the bounds of // actual heap arena ranges. arenaHints *arenaHint // arena is a pre-reserved space for allocating heap arenas // (the actual arenas). This is only used on 32-bit. arena linearAlloc
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 78K bytes - Viewed (0)