- Sort Score
- Result 10 results
- Languages All
Results 61 - 70 of 451 for aligned (0.13 sec)
-
src/internal/runtime/atomic/atomic_loong64.s
RET // void Or8(byte volatile*, byte); TEXT ·Or8(SB), NOSPLIT, $0-9 MOVV ptr+0(FP), R4 MOVBU val+8(FP), R5 // Align ptr down to 4 bytes so we can use 32-bit load/store. MOVV $~3, R6 AND R4, R6 // R7 = ((ptr & 3) * 8) AND $3, R4, R7 SLLV $3, R7 // Shift val for aligned ptr. R5 = val << R4 SLLV R7, R5 DBAR LL (R6), R7 OR R5, R7 SC R7, (R6) BEQ R7, -4(PC) DBAR RET
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 6.3K bytes - Viewed (0) -
src/runtime/memmove_arm64.s
// check is negligible since it is only required for large copies. // // Large copies use a software pipelined loop processing 64 bytes per iteration. // The destination pointer is 16-byte aligned to minimize unaligned accesses. // The loop tail is handled by always copying 64 bytes from the end. // func memmove(to, from unsafe.Pointer, n uintptr) TEXT runtime·memmove<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-24 CBZ R2, copy0
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Mar 18 18:26:13 UTC 2022 - 6K bytes - Viewed (0) -
src/runtime/defs_linux_arm64.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Aug 22 19:05:10 UTC 2023 - 3.6K bytes - Viewed (0) -
src/runtime/sys_windows_arm.s
// using the initial stack allocated by the OS. // It calls back into standard C using the BL below. // To do that, the stack pointer must be 8-byte-aligned. TEXT runtime·_initcgo(SB),NOSPLIT|NOFRAME,$0 MOVM.DB.W [R4, R14], (R13) // push {r4, lr} // Ensure stack is 8-byte aligned before calling C code MOVW R13, R4 BIC $0x7, R13 // Allocate a TLS slot to hold g across calls to external code
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Sep 21 15:56:43 UTC 2023 - 7.7K bytes - Viewed (0) -
src/syscall/route_bsd.go
minRoutingSockaddrLen = rsaAlignOf(0) ) // Round the length of a raw sockaddr up to align it properly. func rsaAlignOf(salen int) int { salign := sizeofPtr if darwin64Bit { // Darwin kernels require 32-bit aligned access to // routing facilities. salign = 4 } else if netbsd32Bit { // NetBSD 6 and beyond kernels require 64-bit aligned // access to routing facilities. salign = 8 } else if runtime.GOOS == "freebsd" {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Feb 26 21:03:59 UTC 2024 - 9.1K bytes - Viewed (0) -
platforms/software/dependency-management/src/main/java/org/gradle/api/internal/artifacts/ivyservice/resolveengine/graph/builder/PotentialEdge.java
// We need to check if the target version exists. For this, we have to try to get metadata for the aligned version. // If it's there, it means we can align, otherwise, we must NOT add the edge, or resolution would fail ComponentGraphResolveState metadata = version.getResolveStateOrNull();
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Tue Oct 10 21:10:11 UTC 2023 - 4.3K bytes - Viewed (0) -
src/runtime/malloc.go
p = alignUp(p, align) p2 := sysReserve(unsafe.Pointer(p), size) if p != uintptr(p2) { // Must have raced. Try again. sysFreeOS(p2, size) if retries++; retries == 100 { throw("failed to allocate aligned heap memory; too many retries") } goto retry } // Success. return p2, size default: // Trim off the unaligned parts. pAligned := alignUp(p, align)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 59.6K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_tensor.cc
} // The pointer tag, OR-ed into the XlaTensor's address to distinguish it from // device-side tensors, which are either CPU or GPU memory pointers. This works // because we're guaranteed that CPU and GPU pointers are aligned to > 1 bits. namespace { constexpr uintptr_t kTag = 0x1ULL; } /*static*/ XlaTensor* XlaTensor::FromOpaquePointer(void* ptr) { uintptr_t value = reinterpret_cast<uintptr_t>(ptr); if (value & kTag) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 4.5K bytes - Viewed (0) -
src/internal/abi/abi.go
// // This method is a helper for dealing with the endianness of different CPU // architectures, since sub-word-sized arguments in big endian architectures // need to be "aligned" to the upper edge of the register to be interpreted // by the CPU correctly. func (r *RegArgs) IntRegArgAddr(reg int, argSize uintptr) unsafe.Pointer { if argSize > goarch.PtrSize || argSize == 0 || argSize&(argSize-1) != 0 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Jan 23 15:51:32 UTC 2023 - 3.1K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_mips64x.s
TEXT ·Or8(SB), NOSPLIT, $0-9 MOVV ptr+0(FP), R1 MOVBU val+8(FP), R2 // Align ptr down to 4 bytes so we can use 32-bit load/store. MOVV $~3, R3 AND R1, R3 // Compute val shift. #ifdef GOARCH_mips64 // Big endian. ptr = ptr ^ 3 XOR $3, R1 #endif // R4 = ((ptr & 3) * 8) AND $3, R1, R4 SLLV $3, R4 // Shift val for aligned ptr. R2 = val << R4 SLLV R4, R2 SYNC LL (R3), R4 OR R2, R4
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat May 11 21:29:34 UTC 2024 - 7.2K bytes - Viewed (0)