- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 164 for Malloc (0.16 sec)
-
src/syscall/exec_freebsd.go
// (Pipe is close-on-exec so if exec succeeds, it will be closed.) // In the child, this function must not acquire any locks, because // they might have been locked at the time of the fork. This means // no rescheduling, no malloc calls, and no new stack segments. // For the same reason compiler does not race instrument it. // The calls to RawSyscall are okay because they are assembly // functions that do not grow the stack. // //go:norace
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Sep 29 18:51:35 UTC 2023 - 8.4K bytes - Viewed (0) -
platforms/documentation/docs/src/snippets/native-binaries/google-test/groovy/libs/googleTest/1.7.0/include/gtest/internal/gtest-string.h
// delete[]. Returns the cloned string, or NULL if the input is // NULL. // // This is different from strdup() in string.h, which allocates // memory using malloc(). static const char* CloneCString(const char* c_str); #if GTEST_OS_WINDOWS_MOBILE // Windows CE does not have the 'ANSI' versions of Win32 APIs. To be
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Mon Nov 27 17:53:42 UTC 2023 - 6.8K bytes - Viewed (0) -
src/runtime/mcentral.go
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Central free lists. // // See malloc.go for an overview. // // The mcentral doesn't actually contain the list of free objects; the mspan does. // Each mcentral is two lists of mspans: those with free objects (c->nonempty) // and those that are completely allocated (c->empty). package runtime
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 8.1K bytes - Viewed (0) -
src/syscall/exec_bsd.go
// (Pipe is close-on-exec so if exec succeeds, it will be closed.) // In the child, this function must not acquire any locks, because // they might have been locked at the time of the fork. This means // no rescheduling, no malloc calls, and no new stack segments. // For the same reason compiler does not race instrument it. // The calls to RawSyscall are okay because they are assembly // functions that do not grow the stack. // //go:norace
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Sep 29 18:51:35 UTC 2023 - 7.9K bytes - Viewed (0) -
src/syscall/exec_libc2.go
// (Pipe is close-on-exec so if exec succeeds, it will be closed.) // In the child, this function must not acquire any locks, because // they might have been locked at the time of the fork. This means // no rescheduling, no malloc calls, and no new stack segments. // For the same reason compiler does not race instrument it. // The calls to rawSyscall are okay because they are assembly // functions that do not grow the stack. // //go:norace
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Sep 29 18:51:35 UTC 2023 - 8.2K bytes - Viewed (0) -
src/testing/allocs_test.go
var global any var allocsPerRunTests = []struct { name string fn func() allocs float64 }{ {"alloc *byte", func() { global = new(*byte) }, 1}, {"alloc complex128", func() { global = new(complex128) }, 1}, {"alloc float64", func() { global = new(float64) }, 1}, {"alloc int32", func() { global = new(int32) }, 1}, {"alloc byte", func() { global = new(byte) }, 1}, } func TestAllocsPerRun(t *testing.T) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Dec 13 18:45:54 UTC 2021 - 817 bytes - Viewed (0) -
staging/src/k8s.io/apiserver/pkg/util/flowcontrol/conc_alloc_test.go
} } else if alloc >= item.upperBound { if allocs[idx] != item.upperBound { t.Fatalf("For requiredSum=%v, %s classes=%#+v got solution %v, %v in which item %d should be its upper bound but is not", requiredSum, style, classes, allocs, fairProp, idx) } } else if f64RelDiff(alloc, allocs[idx]) > fpSlack {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Mon Nov 07 18:17:27 UTC 2022 - 5.3K bytes - Viewed (0) -
src/runtime/mpagecache.go
return c.base + i*pageSize, uintptr(scav) * pageSize } return c.allocN(npages) } // allocN is a helper which attempts to allocate npages worth of pages // from the cache. It represents the general case for allocating from // the page cache. // // Returns a base address and the amount of scavenged memory in the // allocated region in bytes. func (c *pageCache) allocN(npages uintptr) (uintptr, uintptr) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Apr 19 14:30:00 UTC 2023 - 5.6K bytes - Viewed (0) -
src/strings/builder_test.go
} }) } func TestBuilderGrowSizeclasses(t *testing.T) { s := Repeat("a", 19) allocs := testing.AllocsPerRun(100, func() { var b Builder b.Grow(18) b.WriteString(s) _ = b.String() }) if allocs > 1 { t.Fatalf("unexpected amount of allocations: %v, want: 1", allocs) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Feb 19 19:51:15 UTC 2024 - 8.1K bytes - Viewed (0) -
src/runtime/pprof/protomem_test.go
} } } type opAlloc struct { buf [128]byte } type opCall struct { } var sink []byte func storeAlloc() { sink = make([]byte, 16) } func nonRecursiveGenericAllocFunction[CurrentOp any, OtherOp any](alloc bool) { if alloc { storeAlloc() } else { nonRecursiveGenericAllocFunction[OtherOp, CurrentOp](true)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 21 14:38:45 UTC 2024 - 6.7K bytes - Viewed (0)