- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 430 for free1 (0.05 sec)
-
src/runtime/metrics_test.go
} if b, c := len(objects.free.Buckets), len(objects.free.Counts); b != c+1 { t.Errorf("frees-by-size has wrong bucket or counts length: %d buckets, %d counts", b, c) } if len(objects.alloc.Buckets) != len(objects.free.Buckets) { t.Error("allocs-by-size and frees-by-size buckets don't match in length") } else if len(objects.alloc.Counts) != len(objects.free.Counts) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:52:17 UTC 2024 - 45K bytes - Viewed (0) -
src/runtime/runtime2.go
sudoglock mutex sudogcache *sudog // Central pool of available defer structs. deferlock mutex deferpool *_defer // freem is the list of m's waiting to be freed when their // m.exited is set. Linked through m.freelink. freem *m gcwaiting atomic.Bool // gc is waiting to run stopwait int32 stopnote note sysmonwait atomic.Bool sysmonnote note
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:57:37 UTC 2024 - 47.9K bytes - Viewed (0) -
platforms/documentation/docs/src/snippets/native-binaries/cunit/groovy/libs/cunit/2.1-2/include/CUnit/TestDB.h
* be created and added but the error code will be set to CUE_DUP_SUITE. * The duplicate suite will not be accessible by name.<br /><br /> * * NOTE - the CU_pSuite pointer returned should NOT BE FREED BY * THE USER. The suite is freed by the CUnit system when * CU_cleanup_registry() is called. <b>This function must not * be called during a test run (checked by assertion)</b>. <br /><br /> *
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Mon Nov 27 17:53:42 UTC 2023 - 40.4K bytes - Viewed (0) -
src/runtime/stack.go
mheap_.freeManual(s, spanAllocStack) } s = next } unlock(&stackpool[order].item.mu) } // Free large stack spans. lock(&stackLarge.lock) for i := range stackLarge.free { for s := stackLarge.free[i].first; s != nil; { next := s.next stackLarge.free[i].remove(s) osStackFree(s) mheap_.freeManual(s, spanAllocStack) s = next } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 41.1K bytes - Viewed (0) -
src/runtime/mspanset.go
// allows this to be used in the memory manager and avoids the // need for write barriers on all of these. spanSetBlocks are // managed in a pool, though never freed back to the operating // system. We never release spine memory because there could be // concurrent lock-free access and we're likely to reuse it // anyway. (In principle, we could do this during STW.) spineLock mutex
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 13.1K bytes - Viewed (0) -
src/runtime/mcache.go
// NOTE(rsc,rlh): If gcworkbuffree comes back, we need to coordinate // with the stealing of gcworkbufs during garbage collection to avoid // a race where the workbuf is double-freed. // gcworkbuffree(c.gcworkbuf) lock(&mheap_.lock) mheap_.cachealloc.free(unsafe.Pointer(c)) unlock(&mheap_.lock) }) } // getMCache is a convenience function which tries to obtain an mcache. //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 10K bytes - Viewed (0) -
src/runtime/arena_test.go
} arena.Free() }) t.Run("[]struct{}", func(t *testing.T) { arena := NewUserArena() var sl []struct{} arena.Slice(&sl, 10) if v := unsafe.Pointer(&sl[0]); v != ZeroBase { t.Errorf("expected zero-sized type to be allocated as zerobase: got %x, want %x", v, ZeroBase) } arena.Free() }) t.Run("[]int (cap 0)", func(t *testing.T) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 13.4K bytes - Viewed (0) -
src/runtime/mfinal.go
// program. // // Finalizers are run in dependency order: if A points at B, both have // finalizers, and they are otherwise unreachable, only the finalizer // for A runs; once A is freed, the finalizer for B can run. // If a cyclic structure includes a block with a finalizer, that // cycle is not guaranteed to be garbage collected and the finalizer // is not guaranteed to run, because there is no ordering that
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 01:56:56 UTC 2024 - 19K bytes - Viewed (0) -
src/runtime/HACKING.md
multiples of the system page size, but it can be freed with sysFree. * persistentalloc combines multiple smaller allocations into a single sysAlloc to avoid fragmentation. However, there is no way to free persistentalloced objects (hence the name). * fixalloc is a SLAB-style allocator that allocates objects of a fixed size. fixalloced objects can be freed, but this memory can only be
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 13.9K bytes - Viewed (0) -
pkg/registry/core/service/portallocator/allocator_test.go
if _, ok := err.(*ErrNotInRange); !ok { t.Fatal(err) } if f := r.Free(); f != 1 { t.Errorf("unexpected free %d", f) } if f := r.Used(); f != 200 { t.Errorf("unexpected used %d", f) } if err := r.Allocate(released); err != nil { t.Fatal(err) } if f := r.Free(); f != 0 { t.Errorf("unexpected free %d", f) } if f := r.Used(); f != 201 { t.Errorf("unexpected used %d", f) }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed May 08 07:15:02 UTC 2024 - 14K bytes - Viewed (0)