- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 57 for aligned (0.32 sec)
-
src/internal/bytealg/compare_loong64.s
entry: ADDV R4, R14, R12 // R6 start of a, R14 end of a BEQ R4, R12, samebytes // length is 0 SRLV $4, R14 // R14 is number of chunks BEQ R0, R14, byte_loop // make sure both a and b are aligned. OR R4, R6, R15 AND $7, R15 BNE R0, R15, byte_loop PCALIGN $16 chunk16_loop: BEQ R0, R14, byte_loop MOVV (R4), R8 MOVV (R6), R9 BNE R8, R9, byte_loop MOVV 8(R4), R16 MOVV 8(R6), R17
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 13 15:04:25 UTC 2024 - 1.7K bytes - Viewed (0) -
tensorflow/c/tf_tensor.h
TF_DataType, const int64_t* dims, int num_dims, void* data, size_t len, void (*deallocator)(void* data, size_t len, void* arg), void* deallocator_arg); // Returns the alignment, in bytes, required for allocating aligned tensors. // // This can be used in combination with TF_NewTensor to manually manage // memory while ensuring the resulting tensors satisfy TensorFlow's // memory alignment preferences.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 06 16:40:30 UTC 2024 - 6.3K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_mipsx.s
TEXT ·Or8(SB),NOSPLIT,$0-5 MOVW ptr+0(FP), R1 MOVBU val+4(FP), R2 MOVW $~3, R3 // Align ptr down to 4 bytes so we can use 32-bit load/store. AND R1, R3 #ifdef GOARCH_mips // Big endian. ptr = ptr ^ 3 XOR $3, R1 #endif AND $3, R1, R4 // R4 = ((ptr & 3) * 8) SLL $3, R4 SLL R4, R2, R2 // Shift val for aligned ptr. R2 = val << R4 SYNC try_or8: LL (R3), R4 // R4 = *R3 OR R2, R4
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat May 11 21:29:34 UTC 2024 - 4.9K bytes - Viewed (0) -
src/internal/trace/gc_test.go
for i, u := range util { if u.Time+int64(window) > util[len(util)-1].Time { break } mmu = math.Min(mmu, muInWindow(util[i:], u.Time+int64(window))) } } // Consider all left-aligned windows. update() // Reverse the trace. Slightly subtle because each MutatorUtil // is a *change*. rutil := make([]trace.MutatorUtil, len(util)) if util[len(util)-1].Util != 0 { panic("irreversible trace")
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 5.3K bytes - Viewed (0) -
internal/bpool/bpool.go
// available in the pool. func (bp *BytePoolCap) Get() (b []byte) { if bp == nil { return nil } select { case b = <-bp.c: // reuse existing buffer default: // create new aligned buffer if bp.wcap > 0 { b = reedsolomon.AllocAligned(1, bp.wcap)[0][:bp.w] } else { b = reedsolomon.AllocAligned(1, bp.w)[0] } } return } // Put returns the given Buffer to the BytePool.
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Fri Apr 19 16:44:59 UTC 2024 - 2.6K bytes - Viewed (0) -
src/runtime/traceregion.go
off atomic.Uintptr } const traceRegionAllocBlockData = 64<<10 - unsafe.Sizeof(traceRegionAllocBlockHeader{}) // alloc allocates n-byte block. The block is always aligned to 8 bytes, regardless of platform. func (a *traceRegionAlloc) alloc(n uintptr) *notInHeap { n = alignUp(n, 8) if n > traceRegionAllocBlockData { throw("traceRegion: alloc too large") } if a.dropping.Load() { throw("traceRegion: alloc with concurrent drop") }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:47:01 UTC 2024 - 3.2K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_arm.s
DMB MB_ISHST STREXD R2, (R1), R0 // stores R2 and R3 CMP $0, R0 BNE store64loop DMB MB_ISH RET // The following functions all panic if their address argument isn't // 8-byte aligned. Since we're calling back into Go code to do this, // we have to cooperate with stack unwinding. In the normal case, the // functions tail-call into the appropriate implementation, which
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 5.7K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_loong64.s
RET // void Or8(byte volatile*, byte); TEXT ·Or8(SB), NOSPLIT, $0-9 MOVV ptr+0(FP), R4 MOVBU val+8(FP), R5 // Align ptr down to 4 bytes so we can use 32-bit load/store. MOVV $~3, R6 AND R4, R6 // R7 = ((ptr & 3) * 8) AND $3, R4, R7 SLLV $3, R7 // Shift val for aligned ptr. R5 = val << R4 SLLV R7, R5 DBAR LL (R6), R7 OR R5, R7 SC R7, (R6) BEQ R7, -4(PC) DBAR RET
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 6.3K bytes - Viewed (0) -
src/syscall/route_bsd.go
minRoutingSockaddrLen = rsaAlignOf(0) ) // Round the length of a raw sockaddr up to align it properly. func rsaAlignOf(salen int) int { salign := sizeofPtr if darwin64Bit { // Darwin kernels require 32-bit aligned access to // routing facilities. salign = 4 } else if netbsd32Bit { // NetBSD 6 and beyond kernels require 64-bit aligned // access to routing facilities. salign = 8 } else if runtime.GOOS == "freebsd" {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Feb 26 21:03:59 UTC 2024 - 9.1K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_tensor.cc
} // The pointer tag, OR-ed into the XlaTensor's address to distinguish it from // device-side tensors, which are either CPU or GPU memory pointers. This works // because we're guaranteed that CPU and GPU pointers are aligned to > 1 bits. namespace { constexpr uintptr_t kTag = 0x1ULL; } /*static*/ XlaTensor* XlaTensor::FromOpaquePointer(void* ptr) { uintptr_t value = reinterpret_cast<uintptr_t>(ptr); if (value & kTag) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 4.5K bytes - Viewed (0)