Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 27 for aligned (0.14 sec)

  1. maven-compat/src/test/java/org/apache/maven/project/path/DefaultPathTranslatorTest.java

            String aligned = new DefaultPathTranslator().alignToBaseDirectory("${basedir}", basedir);
    
            assertEquals(basedir.getAbsolutePath(), aligned);
        }
    
        @Test
        void testAlignToBasedirWhereBasedirExpressionIsTheValuePrefix() {
            File basedir = new File(System.getProperty("java.io.tmpdir"), "test").getAbsoluteFile();
    
    Registered: Wed Jun 12 09:55:16 UTC 2024
    - Last Modified: Thu Apr 25 05:46:50 UTC 2024
    - 2K bytes
    - Viewed (0)
  2. src/runtime/memmove_loong64.s

    	SUBVU	R5, R4, R7
    	AND	$7, R7
    	BNE	R7, out
    
    	// if less than 8 bytes, do byte copying
    	SGTU	$8, R6, R7
    	BNE	R7, out
    
    	// do one byte at a time until 8-aligned
    	AND	$7, R4, R8
    	BEQ	R8, words
    	MOVB	(R5), R7
    	ADDV	$1, R5
    	MOVB	R7, (R4)
    	ADDV	$1, R4
    	JMP	-6(PC)
    
    words:
    	// do 8 bytes at a time if there is room
    	ADDV	$-7, R9, R6 // R6 is end pointer-7
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon May 13 15:04:25 UTC 2024
    - 1.8K bytes
    - Viewed (0)
  3. src/runtime/memclr_loong64.s

    TEXT runtime·memclrNoHeapPointers<ABIInternal>(SB),NOSPLIT,$0-16
    	ADDV	R4, R5, R6
    
    	// if less than 8 bytes, do one byte at a time
    	SGTU	$8, R5, R8
    	BNE	R8, out
    
    	// do one byte at a time until 8-aligned
    	AND	$7, R4, R8
    	BEQ	R8, words
    	MOVB	R0, (R4)
    	ADDV	$1, R4
    	JMP	-4(PC)
    
    words:
    	// do 8 bytes at a time if there is room
    	ADDV	$-7, R6, R5
    
    	PCALIGN	$16
    	SGTU	R5, R4, R8
    	BEQ	R8, out
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon May 13 15:04:25 UTC 2024
    - 766 bytes
    - Viewed (0)
  4. src/runtime/checkptr.go

    	// nil pointer is always suitably aligned (#47430).
    	if p == nil {
    		return
    	}
    
    	// Check that (*[n]elem)(p) is appropriately aligned.
    	// Note that we allow unaligned pointers if the types they point to contain
    	// no pointers themselves. See issue 37298.
    	// TODO(mdempsky): What about fieldAlign?
    	if elem.Pointers() && uintptr(p)&(uintptr(elem.Align_)-1) != 0 {
    		throw("checkptr: misaligned pointer conversion")
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 00:18:55 UTC 2024
    - 3.6K bytes
    - Viewed (0)
  5. src/internal/bytealg/compare_loong64.s

    entry:
    	ADDV	R4, R14, R12	// R6 start of a, R14 end of a
    	BEQ	R4, R12, samebytes // length is 0
    
    	SRLV	$4, R14		// R14 is number of chunks
    	BEQ	R0, R14, byte_loop
    
    	// make sure both a and b are aligned.
    	OR	R4, R6, R15
    	AND	$7, R15
    	BNE	R0, R15, byte_loop
    
    	PCALIGN	$16
    chunk16_loop:
    	BEQ	R0, R14, byte_loop
    	MOVV	(R4), R8
    	MOVV	(R6), R9
    	BNE	R8, R9, byte_loop
    	MOVV	8(R4), R16
    	MOVV	8(R6), R17
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon May 13 15:04:25 UTC 2024
    - 1.7K bytes
    - Viewed (0)
  6. src/internal/runtime/atomic/atomic_mipsx.s

    TEXT ·Or8(SB),NOSPLIT,$0-5
    	MOVW	ptr+0(FP), R1
    	MOVBU	val+4(FP), R2
    	MOVW	$~3, R3	// Align ptr down to 4 bytes so we can use 32-bit load/store.
    	AND	R1, R3
    #ifdef GOARCH_mips
    	// Big endian.  ptr = ptr ^ 3
    	XOR	$3, R1
    #endif
    	AND	$3, R1, R4	// R4 = ((ptr & 3) * 8)
    	SLL	$3, R4
    	SLL	R4, R2, R2	// Shift val for aligned ptr. R2 = val << R4
    	SYNC
    try_or8:
    	LL	(R3), R4	// R4 = *R3
    	OR	R2, R4
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sat May 11 21:29:34 UTC 2024
    - 4.9K bytes
    - Viewed (0)
  7. src/internal/trace/gc_test.go

    		for i, u := range util {
    			if u.Time+int64(window) > util[len(util)-1].Time {
    				break
    			}
    			mmu = math.Min(mmu, muInWindow(util[i:], u.Time+int64(window)))
    		}
    	}
    
    	// Consider all left-aligned windows.
    	update()
    	// Reverse the trace. Slightly subtle because each MutatorUtil
    	// is a *change*.
    	rutil := make([]trace.MutatorUtil, len(util))
    	if util[len(util)-1].Util != 0 {
    		panic("irreversible trace")
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:48:18 UTC 2024
    - 5.3K bytes
    - Viewed (0)
  8. internal/bpool/bpool.go

    // available in the pool.
    func (bp *BytePoolCap) Get() (b []byte) {
    	if bp == nil {
    		return nil
    	}
    	select {
    	case b = <-bp.c:
    		// reuse existing buffer
    	default:
    		// create new aligned buffer
    		if bp.wcap > 0 {
    			b = reedsolomon.AllocAligned(1, bp.wcap)[0][:bp.w]
    		} else {
    			b = reedsolomon.AllocAligned(1, bp.w)[0]
    		}
    	}
    	return
    }
    
    // Put returns the given Buffer to the BytePool.
    Registered: Sun Jun 16 00:44:34 UTC 2024
    - Last Modified: Fri Apr 19 16:44:59 UTC 2024
    - 2.6K bytes
    - Viewed (0)
  9. src/runtime/traceregion.go

    	off  atomic.Uintptr
    }
    
    const traceRegionAllocBlockData = 64<<10 - unsafe.Sizeof(traceRegionAllocBlockHeader{})
    
    // alloc allocates n-byte block. The block is always aligned to 8 bytes, regardless of platform.
    func (a *traceRegionAlloc) alloc(n uintptr) *notInHeap {
    	n = alignUp(n, 8)
    	if n > traceRegionAllocBlockData {
    		throw("traceRegion: alloc too large")
    	}
    	if a.dropping.Load() {
    		throw("traceRegion: alloc with concurrent drop")
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:47:01 UTC 2024
    - 3.2K bytes
    - Viewed (0)
  10. src/internal/runtime/atomic/atomic_mips64x.s

    TEXT ·Or8(SB), NOSPLIT, $0-9
    	MOVV	ptr+0(FP), R1
    	MOVBU	val+8(FP), R2
    	// Align ptr down to 4 bytes so we can use 32-bit load/store.
    	MOVV	$~3, R3
    	AND	R1, R3
    	// Compute val shift.
    #ifdef GOARCH_mips64
    	// Big endian.  ptr = ptr ^ 3
    	XOR	$3, R1
    #endif
    	// R4 = ((ptr & 3) * 8)
    	AND	$3, R1, R4
    	SLLV	$3, R4
    	// Shift val for aligned ptr. R2 = val << R4
    	SLLV	R4, R2
    
    	SYNC
    	LL	(R3), R4
    	OR	R2, R4
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sat May 11 21:29:34 UTC 2024
    - 7.2K bytes
    - Viewed (0)
Back to top