- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 168 for Malloc (0.1 sec)
-
src/runtime/os_windows.go
for p[0] != 0 { p = p[1:] } p = p[1:] // skip nil byte } stdcall1(_FreeEnvironmentStringsW, uintptr(strings)) // We call these all the way here, late in init, so that malloc works // for the callback functions these generate. var fn any = ctrlHandler ctrlHandlerPC := compileCallback(*efaceOf(&fn), true) stdcall2(_SetConsoleCtrlHandler, ctrlHandlerPC, 1) monitorSuspendResume()
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 26 22:55:25 UTC 2024 - 41.5K bytes - Viewed (0) -
src/runtime/stack.go
// hchan locks. Normally, we only allow acquiring hchan // locks and then getting a gscan bit. In this case, we // already have the gscan bit. We allow acquiring hchan // locks here as a special case, since a deadlock can't // happen because the G involved must already be // suspended. So, we get a special hchan lock rank here // that is lower than gscan, but doesn't allow acquiring // any other locks other than hchan.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 41.1K bytes - Viewed (0) -
src/path/filepath/path_test.go
} if testing.Short() { t.Skip("skipping malloc count in short mode") } if runtime.GOMAXPROCS(0) > 1 { t.Log("skipping AllocsPerRun checks; GOMAXPROCS>1") return } for _, test := range tests { allocs := testing.AllocsPerRun(100, func() { filepath.Clean(test.result) }) if allocs > 0 { t.Errorf("Clean(%q): %v allocs, want zero", test.result, allocs) } } } type IsLocalTest struct {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Mar 22 16:38:19 UTC 2024 - 47.1K bytes - Viewed (0) -
src/runtime/signal_unix.go
// signals then delivers them itself by calling // the signal handler directly when C code, // including C code called via cgo, calls a // TSAN-intercepted function such as malloc. // // We check this condition last as g0.stack.lo // may be not very accurate (see mstart). st := stackt{ss_size: mp.g0.stack.hi - mp.g0.stack.lo} setSignalstackSP(&st, mp.g0.stack.lo)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 10 16:04:54 UTC 2024 - 45K bytes - Viewed (0) -
src/runtime/runtime2.go
// to allocate gcAssistBytes bytes without assisting. If this // is negative, then the G must correct this by performing // scan work. We track this in bytes to make it fast to update // and check for debt in the malloc hot path. The assist ratio // determines how this corresponds to scan work debt. gcAssistBytes int64 } // gTrackingPeriod is the number of transitions out of _Grunning between // latency tracking runs.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:57:37 UTC 2024 - 47.9K bytes - Viewed (0) -
src/cmd/compile/internal/liveness/plive.go
livedefer.Set(int32(i)) } if n.IsOutputParamHeapAddr() { // This variable will be overwritten early in the function // prologue (from the result of a mallocgc) but we need to // zero it in case that malloc causes a stack scan. n.SetNeedzero(true) livedefer.Set(int32(i)) } if n.OpenDeferSlot() { // Open-coded defer args slots must be live // everywhere in a function, since a panic can
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 15:22:22 UTC 2024 - 45.2K bytes - Viewed (0) -
src/internal/trace/testdata/tests/go122-annotations-stress.test
data="runtime.(*wakeableSleep).sleep" String id=143 data="runtime.traceStartReadCPU.func1" String id=144 data="runtime.newobject" String id=145 data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/malloc.go" String id=146 data="sync.(*Mutex).Unlock" String id=147 data="runtime/trace.Start.func1" String id=148 data="runtime.traceLocker.Gomaxprocs" String id=149
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 38.3K bytes - Viewed (0) -
src/runtime/mpagealloc_test.go
nAlloc := (PallocChunkPages * 4) / int(npages) for i := 0; i < nAlloc; i++ { addr := PageBase(BaseChunkIdx, uint(i)*uint(npages)) if a, _ := b.Alloc(npages); a != addr { t.Fatalf("bad alloc #%d: want 0x%x, got 0x%x", i+1, addr, a) } } // Check to make sure the next allocation fails. if a, _ := b.Alloc(npages); a != 0 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Dec 06 19:16:48 UTC 2021 - 32.6K bytes - Viewed (0) -
src/runtime/mpallocbits.go
return (*pageBits)(b).block64(i) } // allocPages64 allocates a 64-bit block of 64 pages aligned to 64 pages according // to the bits set in alloc. The block set is the one containing the i'th page. func (b *pallocBits) allocPages64(i uint, alloc uint64) { (*pageBits)(b).setBlock64(i, alloc) } // findBitRange64 returns the bit index of the first set of // n consecutive 1 bits. If no consecutive set of 1 bits of
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat May 18 15:13:43 UTC 2024 - 12.5K bytes - Viewed (0) -
src/runtime/metrics_test.go
t.Error("allocs-by-size and frees-by-size counts don't match in length") } else { for i := range objects.alloc.Buckets { ba := objects.alloc.Buckets[i] bf := objects.free.Buckets[i] if ba != bf { t.Errorf("bucket %d is different for alloc and free hists: %f != %f", i, ba, bf) } } if !t.Failed() { var gotAlloc, gotFree uint64 want := objects.total for i := range objects.alloc.Counts {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:52:17 UTC 2024 - 45K bytes - Viewed (0)