- Sort Score
- Result 10 results
- Languages All
Results 81 - 90 of 286 for aligned2 (0.13 sec)
-
src/internal/goarch/goarch.go
// the compiler word, the link editor word, and the TOC save word. const MinFrameSize = _MinFrameSize // StackAlign is the required alignment of the SP register. // The stack must be at least word aligned, but some architectures require more.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 17 19:48:21 UTC 2022 - 2.1K bytes - Viewed (0) -
src/runtime/memclr_mips64x.s
VMOVB W0, -48(R4) VMOVB W0, -32(R4) VMOVB W0, -16(R4) JMP done no_msa: // if less than 8 bytes, do one byte at a time SGTU $8, R2, R3 BNE R3, out // do one byte at a time until 8-aligned AND $7, R1, R3 BEQ R3, words MOVB R0, (R1) ADDV $1, R1 JMP -4(PC) words: // do 8 bytes at a time if there is room ADDV $-7, R4, R2 SGTU R2, R1, R3 BEQ R3, out MOVV R0, (R1)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat Nov 06 10:24:44 UTC 2021 - 1.7K bytes - Viewed (0) -
src/vendor/golang.org/x/net/route/sys_darwin.go
ifmam2 := &wireFormat{extOff: sizeofIfmaMsghdr2Darwin15, bodyOff: sizeofIfmaMsghdr2Darwin15} ifmam2.parse = ifmam2.parseInterfaceMulticastAddrMessage // Darwin kernels require 32-bit aligned access to routing facilities. return 4, map[int]*wireFormat{ syscall.RTM_ADD: rtm, syscall.RTM_DELETE: rtm, syscall.RTM_CHANGE: rtm, syscall.RTM_GET: rtm, syscall.RTM_LOSING: rtm,
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Aug 05 19:54:32 UTC 2022 - 2.8K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_arm.s
DMB MB_ISHST STREXD R2, (R1), R0 // stores R2 and R3 CMP $0, R0 BNE store64loop DMB MB_ISH RET // The following functions all panic if their address argument isn't // 8-byte aligned. Since we're calling back into Go code to do this, // we have to cooperate with stack unwinding. In the normal case, the // functions tail-call into the appropriate implementation, which
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 5.7K bytes - Viewed (0) -
src/internal/runtime/atomic/types.go
// two's-complement way. // //go:nosplit func (i *Int32) Add(delta int32) int32 { return Xaddint32(&i.value, delta) } // Int64 is an atomically accessed int64 value. // // 8-byte aligned on all platforms, unlike a regular int64. // // An Int64 must not be copied. type Int64 struct { noCopy noCopy _ align64 value int64 } // Load accesses and returns the value atomically. //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 14.2K bytes - Viewed (0) -
src/internal/bytealg/equal_mips64x.s
MOVV $1, R1 MOVB R1, ret+24(FP) RET byte_test: MOVBU (R1), R6 ADDV $1, R1 MOVBU (R2), R7 ADDV $1, R2 BEQ R6, R7, byte_loop JMP not_eq chunk_entry: // make sure both a and b are aligned OR R1, R2, R9 AND $0x7, R9 BNE R0, R9, byte_loop JMP chunk_loop_1 chunk_loop: // chunk size is 16 SGTU $16, R3, R8 BNE R0, R8, chunk_tail_8 chunk_loop_1: MOVV (R1), R6 MOVV (R2), R7
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat Nov 06 10:24:44 UTC 2021 - 2K bytes - Viewed (0) -
tensorflow/c/experimental/filesystem/plugins/gcs/ram_file_block_cache.cc
// The cache is effectively disabled, so we pass the read through to the // fetcher without breaking it up into blocks. return block_fetcher_(filename, offset, n, buffer, status); } // Calculate the block-aligned start and end of the read. size_t start = block_size_ * (offset / block_size_); size_t finish = block_size_ * ((offset + n) / block_size_); if (finish < offset + n) { finish += block_size_; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jul 16 01:39:09 UTC 2020 - 11.1K bytes - Viewed (0) -
src/runtime/memmove_arm.s
_b4align: /* align destination on 4 */ AND.S $3, TE, TMP BEQ _b4aligned MOVBU.W -1(FROM), TMP /* pre-indexed */ MOVBU.W TMP, -1(TE) /* pre-indexed */ B _b4align _b4aligned: /* is source now aligned? */ AND.S $3, FROM, TMP BNE _bunaligned ADD $31, TS, TMP /* do 32-byte chunks if possible */ MOVW TS, savedts-4(SP) _b32loop: CMP TMP, TE BLS _b4tail MOVM.DB.W (FROM), [R0-R7]
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Jun 04 07:25:06 UTC 2020 - 5.9K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_loong64.s
MOVV ptr+0(FP), R4 MOVBU val+8(FP), R5 // Align ptr down to 4 bytes so we can use 32-bit load/store. MOVV $~3, R6 AND R4, R6 // R7 = ((ptr & 3) * 8) AND $3, R4, R7 SLLV $3, R7 // Shift val for aligned ptr. R5 = val << R4 SLLV R7, R5 DBAR LL (R6), R7 OR R5, R7 SC R7, (R6) BEQ R7, -4(PC) DBAR RET // void And8(byte volatile*, byte); TEXT ·And8(SB), NOSPLIT, $0-9
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 6.3K bytes - Viewed (0) -
src/runtime/memmove_arm64.s
// check is negligible since it is only required for large copies. // // Large copies use a software pipelined loop processing 64 bytes per iteration. // The destination pointer is 16-byte aligned to minimize unaligned accesses. // The loop tail is handled by always copying 64 bytes from the end. // func memmove(to, from unsafe.Pointer, n uintptr) TEXT runtime·memmove<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-24
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Mar 18 18:26:13 UTC 2022 - 6K bytes - Viewed (0)