- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 176 for aligned (0.2 sec)
-
pkg/kubelet/cm/devicemanager/manager.go
if err != nil { return nil, err } if allocateRemainingFrom(preferred.Intersection(aligned)) { return allocated, nil } // Then fallback to allocate from the aligned set if no preferred list // is returned (or not enough devices are returned in that list). if allocateRemainingFrom(aligned) { return allocated, nil }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Mon Apr 15 12:01:56 UTC 2024 - 43K bytes - Viewed (0) -
src/runtime/traceregion.go
off atomic.Uintptr } const traceRegionAllocBlockData = 64<<10 - unsafe.Sizeof(traceRegionAllocBlockHeader{}) // alloc allocates n-byte block. The block is always aligned to 8 bytes, regardless of platform. func (a *traceRegionAlloc) alloc(n uintptr) *notInHeap { n = alignUp(n, 8) if n > traceRegionAllocBlockData { throw("traceRegion: alloc too large") } if a.dropping.Load() { throw("traceRegion: alloc with concurrent drop") }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:47:01 UTC 2024 - 3.2K bytes - Viewed (0) -
src/runtime/malloc.go
p = alignUp(p, align) p2 := sysReserve(unsafe.Pointer(p), size) if p != uintptr(p2) { // Must have raced. Try again. sysFreeOS(p2, size) if retries++; retries == 100 { throw("failed to allocate aligned heap memory; too many retries") } goto retry } // Success. return p2, size default: // Trim off the unaligned parts. pAligned := alignUp(p, align)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 59.6K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_mips64x.s
TEXT ·Or8(SB), NOSPLIT, $0-9 MOVV ptr+0(FP), R1 MOVBU val+8(FP), R2 // Align ptr down to 4 bytes so we can use 32-bit load/store. MOVV $~3, R3 AND R1, R3 // Compute val shift. #ifdef GOARCH_mips64 // Big endian. ptr = ptr ^ 3 XOR $3, R1 #endif // R4 = ((ptr & 3) * 8) AND $3, R1, R4 SLLV $3, R4 // Shift val for aligned ptr. R2 = val << R4 SLLV R4, R2 SYNC LL (R3), R4 OR R2, R4
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat May 11 21:29:34 UTC 2024 - 7.2K bytes - Viewed (0) -
src/runtime/stubs.go
// // *ptr is uninitialized memory (e.g., memory that's being reused // for a new allocation) and hence contains only "junk". // // memclrNoHeapPointers ensures that if ptr is pointer-aligned, and n // is a multiple of the pointer size, then any pointer-aligned, // pointer-sized portion is cleared atomically. Despite the function // name, this is necessary because this function is the underlying
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 20.2K bytes - Viewed (0) -
src/runtime/syscall_windows.go
// caller reserved spill space. p.dstSpill = alignUp(p.dstSpill, uintptr(t.Align_)) p.dstSpill += t.Size_ } else { // Register assignment failed. // Undo the work and stack assign. p.parts = oldParts // The Go ABI aligns arguments. p.dstStackSize = alignUp(p.dstStackSize, uintptr(t.Align_)) // Copy just the size of the argument. Note that this
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 20:12:46 UTC 2024 - 16.6K bytes - Viewed (0) -
src/runtime/mgcscavenge.go
} else if minimum > maxPagesPerPhysPage { print("runtime: min = ", minimum, "\n") throw("min too large") } // max may not be min-aligned, so we might accidentally truncate to // a max value which causes us to return a non-min-aligned value. // To prevent this, align max up to a multiple of min (which is always // a power of 2). This also prevents max from ever being less than
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:48:45 UTC 2024 - 52.3K bytes - Viewed (0) -
src/sync/atomic/doc.go
// atomic functions (types [Int64] and [Uint64] are automatically aligned). // The first word in an allocated struct, array, or slice; in a global // variable; or in a local variable (because the subject of all atomic operations // will escape to the heap) can be relied upon to be 64-bit aligned. // SwapInt32 atomically stores new into *addr and returns the previous *addr value.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 21:14:51 UTC 2024 - 11.7K bytes - Viewed (0) -
tensorflow/compiler/aot/codegen_test_h.golden
// ((unknown): f32[1,2], (unknown): s64[3,4], (unknown): f32[1], (unknown): f32[1], (unknown): s32[5]) -> (u32[5,6], f32[1], s32[5]) // // Memory stats: // arg bytes total: 392 // arg bytes aligned: 576 // temp bytes total: 171 // temp bytes aligned: 512 class MyClass final : public tensorflow::XlaCompiledCpuFunction { public: // Number of input arguments for the compiled computation. static constexpr size_t kNumArgs = 5;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 01:20:01 UTC 2024 - 16.6K bytes - Viewed (0) -
src/runtime/mheap.go
// On some platforms we need to provide physical page aligned stack // allocations. Where the page size is less than the physical page // size, we already manage to do this by default. needPhysPageAlign := physPageAlignedStacks && typ == spanAllocStack && pageSize < physPageSize // If the allocation is small enough, try the page cache! // The page cache does not support aligned allocations, so we cannot use
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 78K bytes - Viewed (0)