- Sort Score
- Result 10 results
- Languages All
Results 61 - 70 of 1,032 for Allocate (0.16 sec)
-
src/runtime/os_openbsd.go
// Called on the parent thread (main thread in case of bootstrap), can allocate memory. func mpreinit(mp *m) { gsignalSize := int32(32 * 1024) if GOARCH == "mips64" { gsignalSize = int32(64 * 1024) } mp.gsignal = malg(gsignalSize) mp.gsignal.m = mp } // Called to initialize a new m (including the bootstrap m). // Called on the new thread, can not allocate memory. func minit() { getg().m.procid = uint64(getthrid())
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 6.2K bytes - Viewed (0) -
platforms/core-runtime/process-services/src/main/java/org/gradle/process/internal/health/memory/WindowsOsMemoryInfo.java
// Note: the commit limit is usually less than the hard limit of the commit peak, but I think it would be prudent // for us to not force the user's OS to allocate more page file space, so we'll use the commit limit here. windowsMemoryInfo.getCommitLimit(), availableCommitMemory(windowsMemoryInfo) ); } else {
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Wed Dec 20 23:56:19 UTC 2023 - 2.7K bytes - Viewed (0) -
src/runtime/mheap.go
// to safely deal with potentially invalid pointers, since resolving // such pointers may race with a span being allocated. type mSpanState uint8 const ( mSpanDead mSpanState = iota mSpanInUse // allocated for garbage collected heap mSpanManual // allocated for manual management (e.g., stack allocator) ) // mSpanStateNames are the names of the span states, indexed by // mSpanState.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 78K bytes - Viewed (0) -
src/cmd/compile/internal/ssagen/abi.go
// need to allocate stack space, so it should be OK to mark them // as NOSPLIT in these cases. In addition, my assumption is that // functions written in assembly are NOSPLIT in most (but not all) // cases. In the case of an ABIInternal target that has too many // parameters to fit into registers, the wrapper would need to // allocate stack space, but this seems like an unlikely scenario.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 19:57:43 UTC 2024 - 13.8K bytes - Viewed (0) -
pkg/kubelet/cm/cpumanager/cpu_manager_test.go
pod := makePod("fakePod", "fakeContainer", "2", "2") container := &pod.Spec.Containers[0] mgr.activePods = func() []*v1.Pod { return nil } err := mgr.Allocate(pod, container) if !reflect.DeepEqual(err, testCase.expAllocateErr) { t.Errorf("CPU Manager Allocate() error (%v). expected error: %v but got: %v", testCase.description, testCase.expAllocateErr, err) } mgr.AddContainer(pod, container, "fakeID")
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Fri Oct 06 13:16:15 UTC 2023 - 42.9K bytes - Viewed (0) -
pkg/kubelet/cm/cpumanager/cpu_assignment.go
// NUMA nodes to allocate any 'remainder' CPUs from (in cases where the total // number of CPUs to allocate cannot be evenly distributed across the chosen // set of NUMA nodes). This "balance score" is calculated as the standard // deviation of how many CPUs will be available on each NUMA node after all // evenly distributed and remainder CPUs are allocated. The subset with the
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Thu Jan 25 23:56:21 UTC 2024 - 36.3K bytes - Viewed (0) -
src/runtime/mgcwork.go
if b != nil { b.checkempty() } } // Record that this may acquire the wbufSpans or heap lock to // allocate a workbuf. lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans) lockWithRankMayAcquire(&mheap_.lock, lockRankMheap) if b == nil { // Allocate more workbufs. var s *mspan if work.wbufSpans.free.first != nil { lock(&work.wbufSpans.lock) s = work.wbufSpans.free.first
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 12.9K bytes - Viewed (0) -
platforms/core-runtime/base-services/src/main/java/org/gradle/internal/work/WorkerLeaseRegistry.java
package org.gradle.internal.work; import org.gradle.internal.resources.ResourceLock; /** * Used to obtain and release worker leases to run work. There are a limited number of leases available and this service is used to allocate these to worker threads. * * Used where the operation cannot be packaged as a unit of work, for example when the operation is started and completed in response to separate * events. */
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Fri Sep 22 08:48:02 UTC 2023 - 2K bytes - Viewed (0) -
src/runtime/mklockrank.go
traceBuf < traceStrings; # Malloc allg, allocmR, allp, # procresize execR, # May grow stack execW, # May allocate after BeforeFork hchan, notifyList, reflectOffs, timer, traceStrings, userArenaState # Above MALLOC are things that can allocate memory. < MALLOC # Below MALLOC is the malloc implementation. < fin, spanSetSpine, mspanSpecial, traceTypeTab,
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:47:01 UTC 2024 - 9.1K bytes - Viewed (0) -
src/runtime/malloc.go
// persistentChunkSize is the number of bytes we allocate when we grow // a persistentAlloc. const persistentChunkSize = 256 << 10 // persistentChunks is a list of all the persistent chunks we have // allocated. The list is maintained through the first word in the // persistent chunk. This is updated atomically. var persistentChunks *notInHeap // Wrapper around sysAlloc that can allocate small chunks.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 59.6K bytes - Viewed (0)