- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 137 for aligned (0.2 sec)
-
src/runtime/mpallocbits.go
} // pages64 returns a 64-bit bitmap representing a block of 64 pages aligned // to 64 pages. The returned block of pages is the one containing the i'th // page in this pallocBits. Each bit represents whether the page is in-use. func (b *pallocBits) pages64(i uint) uint64 { return (*pageBits)(b).block64(i) } // allocPages64 allocates a 64-bit block of 64 pages aligned to 64 pages according
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat May 18 15:13:43 UTC 2024 - 12.5K bytes - Viewed (0) -
internal/ioutil/ioutil.go
// directio.AlignSize is defined as 0 in MacOS causing divide by 0 error. const DirectioAlignSize = 4096 // CopyAligned - copies from reader to writer using the aligned input // buffer, it is expected that input buffer is page aligned to // 4K page boundaries. Without passing aligned buffer may cause // this function to return error. // // This code is similar in spirit to io.Copy but it is only to be
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Wed May 22 23:07:14 UTC 2024 - 10.2K bytes - Viewed (0) -
pkg/kubelet/cm/devicemanager/topology_hints.go
m.mutex.Lock() defer m.mutex.Unlock() for resource, requested := range accumulatedResourceRequests { // Only consider devices that actually contain topology information. if aligned := m.deviceHasTopologyAlignment(resource); !aligned { klog.InfoS("Resource does not have a topology preference", "resource", resource) deviceHints[resource] = nil continue }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat Jan 27 02:10:25 UTC 2024 - 9.9K bytes - Viewed (0) -
src/internal/runtime/atomic/types.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 14.2K bytes - Viewed (0) -
pkg/kubelet/cm/devicemanager/manager.go
if err != nil { return nil, err } if allocateRemainingFrom(preferred.Intersection(aligned)) { return allocated, nil } // Then fallback to allocate from the aligned set if no preferred list // is returned (or not enough devices are returned in that list). if allocateRemainingFrom(aligned) { return allocated, nil }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Mon Apr 15 12:01:56 UTC 2024 - 43K bytes - Viewed (0) -
src/runtime/pinner.go
func (s *mspan) refreshPinnerBits() { p := s.getPinnerBits() if p == nil { return } hasPins := false bytes := alignUp(s.pinnerBitSize(), 8) // Iterate over each 8-byte chunk and check for pins. Note that // newPinnerBits guarantees that pinnerBits will be 8-byte aligned, so we // don't have to worry about edge cases, irrelevant bits will simply be // zero.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 04 14:29:45 UTC 2024 - 11K bytes - Viewed (0) -
src/runtime/stubs.go
// // *ptr is uninitialized memory (e.g., memory that's being reused // for a new allocation) and hence contains only "junk". // // memclrNoHeapPointers ensures that if ptr is pointer-aligned, and n // is a multiple of the pointer size, then any pointer-aligned, // pointer-sized portion is cleared atomically. Despite the function // name, this is necessary because this function is the underlying
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 20.2K bytes - Viewed (0) -
src/runtime/syscall_windows.go
// caller reserved spill space. p.dstSpill = alignUp(p.dstSpill, uintptr(t.Align_)) p.dstSpill += t.Size_ } else { // Register assignment failed. // Undo the work and stack assign. p.parts = oldParts // The Go ABI aligns arguments. p.dstStackSize = alignUp(p.dstStackSize, uintptr(t.Align_)) // Copy just the size of the argument. Note that this
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 20:12:46 UTC 2024 - 16.6K bytes - Viewed (0) -
src/sync/atomic/doc.go
// atomic functions (types [Int64] and [Uint64] are automatically aligned). // The first word in an allocated struct, array, or slice; in a global // variable; or in a local variable (because the subject of all atomic operations // will escape to the heap) can be relied upon to be 64-bit aligned. // SwapInt32 atomically stores new into *addr and returns the previous *addr value.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 21:14:51 UTC 2024 - 11.7K bytes - Viewed (0) -
tensorflow/compiler/aot/codegen_test_h.golden
// ((unknown): f32[1,2], (unknown): s64[3,4], (unknown): f32[1], (unknown): f32[1], (unknown): s32[5]) -> (u32[5,6], f32[1], s32[5]) // // Memory stats: // arg bytes total: 392 // arg bytes aligned: 576 // temp bytes total: 171 // temp bytes aligned: 512 class MyClass final : public tensorflow::XlaCompiledCpuFunction { public: // Number of input arguments for the compiled computation. static constexpr size_t kNumArgs = 5;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 01:20:01 UTC 2024 - 16.6K bytes - Viewed (0)