- Sort Score
- Result 10 results
- Languages All
Results 81 - 90 of 510 for aligned (0.11 sec)
-
src/runtime/traceregion.go
off atomic.Uintptr } const traceRegionAllocBlockData = 64<<10 - unsafe.Sizeof(traceRegionAllocBlockHeader{}) // alloc allocates n-byte block. The block is always aligned to 8 bytes, regardless of platform. func (a *traceRegionAlloc) alloc(n uintptr) *notInHeap { n = alignUp(n, 8) if n > traceRegionAllocBlockData { throw("traceRegion: alloc too large") } if a.dropping.Load() { throw("traceRegion: alloc with concurrent drop") }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:47:01 UTC 2024 - 3.2K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_arm.s
DMB MB_ISHST STREXD R2, (R1), R0 // stores R2 and R3 CMP $0, R0 BNE store64loop DMB MB_ISH RET // The following functions all panic if their address argument isn't // 8-byte aligned. Since we're calling back into Go code to do this, // we have to cooperate with stack unwinding. In the normal case, the // functions tail-call into the appropriate implementation, which
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 5.7K bytes - Viewed (0) -
tensorflow/c/experimental/filesystem/plugins/gcs/ram_file_block_cache.cc
// The cache is effectively disabled, so we pass the read through to the // fetcher without breaking it up into blocks. return block_fetcher_(filename, offset, n, buffer, status); } // Calculate the block-aligned start and end of the read. size_t start = block_size_ * (offset / block_size_); size_t finish = block_size_ * ((offset + n) / block_size_); if (finish < offset + n) { finish += block_size_; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jul 16 01:39:09 UTC 2020 - 11.1K bytes - Viewed (0) -
src/internal/bytealg/equal_mips64x.s
MOVV $1, R1 MOVB R1, ret+24(FP) RET byte_test: MOVBU (R1), R6 ADDV $1, R1 MOVBU (R2), R7 ADDV $1, R2 BEQ R6, R7, byte_loop JMP not_eq chunk_entry: // make sure both a and b are aligned OR R1, R2, R9 AND $0x7, R9 BNE R0, R9, byte_loop JMP chunk_loop_1 chunk_loop: // chunk size is 16 SGTU $16, R3, R8 BNE R0, R8, chunk_tail_8 chunk_loop_1: MOVV (R1), R6 MOVV (R2), R7
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat Nov 06 10:24:44 UTC 2021 - 2K bytes - Viewed (0) -
src/runtime/memmove_arm.s
_back: ADD N, FROM /* from end pointer */ CMP $4, N /* need at least 4 bytes to copy */ BLT _b1tail _b4align: /* align destination on 4 */ AND.S $3, TE, TMP BEQ _b4aligned MOVBU.W -1(FROM), TMP /* pre-indexed */ MOVBU.W TMP, -1(TE) /* pre-indexed */ B _b4align _b4aligned: /* is source now aligned? */ AND.S $3, FROM, TMP BNE _bunaligned ADD $31, TS, TMP /* do 32-byte chunks if possible */
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Jun 04 07:25:06 UTC 2020 - 5.9K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_loong64.s
RET // void Or8(byte volatile*, byte); TEXT ·Or8(SB), NOSPLIT, $0-9 MOVV ptr+0(FP), R4 MOVBU val+8(FP), R5 // Align ptr down to 4 bytes so we can use 32-bit load/store. MOVV $~3, R6 AND R4, R6 // R7 = ((ptr & 3) * 8) AND $3, R4, R7 SLLV $3, R7 // Shift val for aligned ptr. R5 = val << R4 SLLV R7, R5 DBAR LL (R6), R7 OR R5, R7 SC R7, (R6) BEQ R7, -4(PC) DBAR RET
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 6.3K bytes - Viewed (0) -
src/runtime/memmove_arm64.s
// check is negligible since it is only required for large copies. // // Large copies use a software pipelined loop processing 64 bytes per iteration. // The destination pointer is 16-byte aligned to minimize unaligned accesses. // The loop tail is handled by always copying 64 bytes from the end. // func memmove(to, from unsafe.Pointer, n uintptr) TEXT runtime·memmove<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-24 CBZ R2, copy0
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Mar 18 18:26:13 UTC 2022 - 6K bytes - Viewed (0) -
src/runtime/defs_linux_arm64.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Aug 22 19:05:10 UTC 2023 - 3.6K bytes - Viewed (0) -
src/runtime/sys_windows_arm.s
// using the initial stack allocated by the OS. // It calls back into standard C using the BL below. // To do that, the stack pointer must be 8-byte-aligned. TEXT runtime·_initcgo(SB),NOSPLIT|NOFRAME,$0 MOVM.DB.W [R4, R14], (R13) // push {r4, lr} // Ensure stack is 8-byte aligned before calling C code MOVW R13, R4 BIC $0x7, R13 // Allocate a TLS slot to hold g across calls to external code
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Sep 21 15:56:43 UTC 2023 - 7.7K bytes - Viewed (0) -
src/syscall/route_bsd.go
minRoutingSockaddrLen = rsaAlignOf(0) ) // Round the length of a raw sockaddr up to align it properly. func rsaAlignOf(salen int) int { salign := sizeofPtr if darwin64Bit { // Darwin kernels require 32-bit aligned access to // routing facilities. salign = 4 } else if netbsd32Bit { // NetBSD 6 and beyond kernels require 64-bit aligned // access to routing facilities. salign = 8 } else if runtime.GOOS == "freebsd" {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Feb 26 21:03:59 UTC 2024 - 9.1K bytes - Viewed (0)