- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 565 for free1 (0.1 sec)
-
src/runtime/metrics/doc.go
defined by /gc/heap/tiny/allocs:objects, only tiny blocks. /gc/heap/frees-by-size:bytes Distribution of freed heap allocations by approximate size. Bucket counts increase monotonically. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. /gc/heap/frees:bytes Cumulative sum of heap memory freed by the garbage collector. /gc/heap/frees:objects
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:58:43 UTC 2024 - 20K bytes - Viewed (0) -
src/runtime/mgcwork.go
// list and move all workbuf spans to the free list. work.empty = 0 work.wbufSpans.free.takeAll(&work.wbufSpans.busy) unlock(&work.wbufSpans.lock) } // freeSomeWbufs frees some workbufs back to the heap and returns // true if it should be called again to free more. func freeSomeWbufs(preemptible bool) bool { const batchSize = 64 // ~1–2 µs per span. lock(&work.wbufSpans.lock)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 12.9K bytes - Viewed (0) -
src/runtime/metrics_test.go
} if b, c := len(objects.free.Buckets), len(objects.free.Counts); b != c+1 { t.Errorf("frees-by-size has wrong bucket or counts length: %d buckets, %d counts", b, c) } if len(objects.alloc.Buckets) != len(objects.free.Buckets) { t.Error("allocs-by-size and frees-by-size buckets don't match in length") } else if len(objects.alloc.Counts) != len(objects.free.Counts) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:52:17 UTC 2024 - 45K bytes - Viewed (0) -
src/runtime/runtime2.go
sudoglock mutex sudogcache *sudog // Central pool of available defer structs. deferlock mutex deferpool *_defer // freem is the list of m's waiting to be freed when their // m.exited is set. Linked through m.freelink. freem *m gcwaiting atomic.Bool // gc is waiting to run stopwait int32 stopnote note sysmonwait atomic.Bool sysmonnote note
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:57:37 UTC 2024 - 47.9K bytes - Viewed (0) -
platforms/documentation/docs/src/snippets/native-binaries/cunit/groovy/libs/cunit/2.1-2/include/CUnit/TestDB.h
* be created and added but the error code will be set to CUE_DUP_SUITE. * The duplicate suite will not be accessible by name.<br /><br /> * * NOTE - the CU_pSuite pointer returned should NOT BE FREED BY * THE USER. The suite is freed by the CUnit system when * CU_cleanup_registry() is called. <b>This function must not * be called during a test run (checked by assertion)</b>. <br /><br /> *
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Mon Nov 27 17:53:42 UTC 2023 - 40.4K bytes - Viewed (0) -
src/runtime/stack.go
mheap_.freeManual(s, spanAllocStack) } s = next } unlock(&stackpool[order].item.mu) } // Free large stack spans. lock(&stackLarge.lock) for i := range stackLarge.free { for s := stackLarge.free[i].first; s != nil; { next := s.next stackLarge.free[i].remove(s) osStackFree(s) mheap_.freeManual(s, spanAllocStack) s = next } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 41.1K bytes - Viewed (0) -
src/runtime/mspanset.go
// allows this to be used in the memory manager and avoids the // need for write barriers on all of these. spanSetBlocks are // managed in a pool, though never freed back to the operating // system. We never release spine memory because there could be // concurrent lock-free access and we're likely to reuse it // anyway. (In principle, we could do this during STW.) spineLock mutex
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 13.1K bytes - Viewed (0) -
src/runtime/mcache.go
// NOTE(rsc,rlh): If gcworkbuffree comes back, we need to coordinate // with the stealing of gcworkbufs during garbage collection to avoid // a race where the workbuf is double-freed. // gcworkbuffree(c.gcworkbuf) lock(&mheap_.lock) mheap_.cachealloc.free(unsafe.Pointer(c)) unlock(&mheap_.lock) }) } // getMCache is a convenience function which tries to obtain an mcache. //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 10K bytes - Viewed (0) -
src/runtime/arena_test.go
} arena.Free() }) t.Run("[]struct{}", func(t *testing.T) { arena := NewUserArena() var sl []struct{} arena.Slice(&sl, 10) if v := unsafe.Pointer(&sl[0]); v != ZeroBase { t.Errorf("expected zero-sized type to be allocated as zerobase: got %x, want %x", v, ZeroBase) } arena.Free() }) t.Run("[]int (cap 0)", func(t *testing.T) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 13.4K bytes - Viewed (0) -
src/runtime/mfinal.go
// program. // // Finalizers are run in dependency order: if A points at B, both have // finalizers, and they are otherwise unreachable, only the finalizer // for A runs; once A is freed, the finalizer for B can run. // If a cyclic structure includes a block with a finalizer, that // cycle is not guaranteed to be garbage collected and the finalizer // is not guaranteed to run, because there is no ordering that
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 01:56:56 UTC 2024 - 19K bytes - Viewed (0)