- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 399 for aligned (0.18 sec)
-
src/runtime/malloc.go
p = alignUp(p, align) p2 := sysReserve(unsafe.Pointer(p), size) if p != uintptr(p2) { // Must have raced. Try again. sysFreeOS(p2, size) if retries++; retries == 100 { throw("failed to allocate aligned heap memory; too many retries") } goto retry } // Success. return p2, size default: // Trim off the unaligned parts. pAligned := alignUp(p, align)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 59.6K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_tensor.cc
} // The pointer tag, OR-ed into the XlaTensor's address to distinguish it from // device-side tensors, which are either CPU or GPU memory pointers. This works // because we're guaranteed that CPU and GPU pointers are aligned to > 1 bits. namespace { constexpr uintptr_t kTag = 0x1ULL; } /*static*/ XlaTensor* XlaTensor::FromOpaquePointer(void* ptr) { uintptr_t value = reinterpret_cast<uintptr_t>(ptr); if (value & kTag) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 4.5K bytes - Viewed (0) -
src/internal/abi/abi.go
// // This method is a helper for dealing with the endianness of different CPU // architectures, since sub-word-sized arguments in big endian architectures // need to be "aligned" to the upper edge of the register to be interpreted // by the CPU correctly. func (r *RegArgs) IntRegArgAddr(reg int, argSize uintptr) unsafe.Pointer { if argSize > goarch.PtrSize || argSize == 0 || argSize&(argSize-1) != 0 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Jan 23 15:51:32 UTC 2023 - 3.1K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_mips64x.s
TEXT ·Or8(SB), NOSPLIT, $0-9 MOVV ptr+0(FP), R1 MOVBU val+8(FP), R2 // Align ptr down to 4 bytes so we can use 32-bit load/store. MOVV $~3, R3 AND R1, R3 // Compute val shift. #ifdef GOARCH_mips64 // Big endian. ptr = ptr ^ 3 XOR $3, R1 #endif // R4 = ((ptr & 3) * 8) AND $3, R1, R4 SLLV $3, R4 // Shift val for aligned ptr. R2 = val << R4 SLLV R4, R2 SYNC LL (R3), R4 OR R2, R4
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat May 11 21:29:34 UTC 2024 - 7.2K bytes - Viewed (0) -
src/runtime/tagptr_64bit.go
// get to really high addresses and panic if it does. addrBits = 48 // In addition to the 16 bits taken from the top, we can take 3 from the // bottom, because node must be pointer-aligned, giving a total of 19 bits // of count. tagBits = 64 - addrBits + 3 // On AIX, 64-bit addresses are split into 36-bit segment number and 28-bit
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 18 20:22:50 UTC 2023 - 3.2K bytes - Viewed (0) -
pkg/kubelet/cm/cpumanager/policy_options.go
// Flag to evenly distribute CPUs across NUMA nodes in cases where more // than one NUMA node is required to satisfy the allocation. DistributeCPUsAcrossNUMA bool // Flag to ensure CPUs are considered aligned at socket boundary rather than // NUMA boundary AlignBySocket bool } // NewStaticPolicyOptions creates a StaticPolicyOptions struct from the user configuration.
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed Sep 27 13:02:15 UTC 2023 - 5.1K bytes - Viewed (0) -
src/math/rand/example_test.go
// Typically a non-fixed seed should be used, such as time.Now().UnixNano(). // Using a fixed seed will produce the same output on every run. r := rand.New(rand.NewSource(99)) // The tabwriter here helps us generate aligned output. w := tabwriter.NewWriter(os.Stdout, 1, 1, 1, ' ', 0) defer w.Flush() show := func(name string, v1, v2, v3 any) { fmt.Fprintf(w, "%s\t%v\t%v\t%v\n", name, v1, v2, v3) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Oct 26 16:24:57 UTC 2022 - 4.2K bytes - Viewed (0) -
src/runtime/pinner.go
func (s *mspan) refreshPinnerBits() { p := s.getPinnerBits() if p == nil { return } hasPins := false bytes := alignUp(s.pinnerBitSize(), 8) // Iterate over each 8-byte chunk and check for pins. Note that // newPinnerBits guarantees that pinnerBits will be 8-byte aligned, so we // don't have to worry about edge cases, irrelevant bits will simply be // zero.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 04 14:29:45 UTC 2024 - 11K bytes - Viewed (0) -
src/cmd/internal/objfile/elf.go
if p.Type == elf.PT_LOAD && p.Flags&elf.PF_X != 0 { // The memory mapping that contains the segment // starts at an aligned address. Apparently this // is what pprof expects, as it uses this and the // start address of the mapping to compute PC // delta. return p.Vaddr - p.Vaddr%p.Align, nil } } return 0, fmt.Errorf("unknown load address") } func (f *elfFile) dwarf() (*dwarf.Data, error) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 07 20:44:50 UTC 2023 - 3.9K bytes - Viewed (0) -
src/runtime/stubs.go
// // *ptr is uninitialized memory (e.g., memory that's being reused // for a new allocation) and hence contains only "junk". // // memclrNoHeapPointers ensures that if ptr is pointer-aligned, and n // is a multiple of the pointer size, then any pointer-aligned, // pointer-sized portion is cleared atomically. Despite the function // name, this is necessary because this function is the underlying
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 20.2K bytes - Viewed (0)