Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 192 for aligned2 (0.22 sec)

  1. src/internal/trace/gc_test.go

    		for i, u := range util {
    			if u.Time+int64(window) > util[len(util)-1].Time {
    				break
    			}
    			mmu = math.Min(mmu, muInWindow(util[i:], u.Time+int64(window)))
    		}
    	}
    
    	// Consider all left-aligned windows.
    	update()
    	// Reverse the trace. Slightly subtle because each MutatorUtil
    	// is a *change*.
    	rutil := make([]trace.MutatorUtil, len(util))
    	if util[len(util)-1].Util != 0 {
    		panic("irreversible trace")
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:48:18 UTC 2024
    - 5.3K bytes
    - Viewed (0)
  2. pkg/kubelet/cm/devicemanager/manager.go

    		if err != nil {
    			return nil, err
    		}
    		if allocateRemainingFrom(preferred.Intersection(aligned)) {
    			return allocated, nil
    		}
    		// Then fallback to allocate from the aligned set if no preferred list
    		// is returned (or not enough devices are returned in that list).
    		if allocateRemainingFrom(aligned) {
    			return allocated, nil
    		}
    
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Mon Apr 15 12:01:56 UTC 2024
    - 43K bytes
    - Viewed (0)
  3. platforms/software/dependency-management/src/integTest/groovy/org/gradle/integtests/resolve/alignment/AlignmentIntegrationTest.groovy

                            if (it.getId().getGroup().startsWith("nebula")) {
                                it.belongsTo("aligned-group:nebula:\${it.getId().getVersion()}")
                            }
                            if (it.getId().getGroup().startsWith("proto")) {
                                it.belongsTo("aligned-group:proto:\${it.getId().getVersion()}")
                            }
    Registered: Wed Jun 12 18:38:38 UTC 2024
    - Last Modified: Tue Oct 10 21:10:11 UTC 2023
    - 59.6K bytes
    - Viewed (0)
  4. internal/bpool/bpool.go

    // available in the pool.
    func (bp *BytePoolCap) Get() (b []byte) {
    	if bp == nil {
    		return nil
    	}
    	select {
    	case b = <-bp.c:
    		// reuse existing buffer
    	default:
    		// create new aligned buffer
    		if bp.wcap > 0 {
    			b = reedsolomon.AllocAligned(1, bp.wcap)[0][:bp.w]
    		} else {
    			b = reedsolomon.AllocAligned(1, bp.w)[0]
    		}
    	}
    	return
    }
    
    // Put returns the given Buffer to the BytePool.
    Registered: Sun Jun 16 00:44:34 UTC 2024
    - Last Modified: Fri Apr 19 16:44:59 UTC 2024
    - 2.6K bytes
    - Viewed (0)
  5. src/internal/runtime/atomic/atomic_arm.s

    	DMB	MB_ISHST
    
    	STREXD	R2, (R1), R0	// stores R2 and R3
    	CMP	$0, R0
    	BNE	store64loop
    
    	DMB	MB_ISH
    	RET
    
    // The following functions all panic if their address argument isn't
    // 8-byte aligned. Since we're calling back into Go code to do this,
    // we have to cooperate with stack unwinding. In the normal case, the
    // functions tail-call into the appropriate implementation, which
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 5.7K bytes
    - Viewed (0)
  6. src/internal/runtime/atomic/types.go

    // two's-complement way.
    //
    //go:nosplit
    func (i *Int32) Add(delta int32) int32 {
    	return Xaddint32(&i.value, delta)
    }
    
    // Int64 is an atomically accessed int64 value.
    //
    // 8-byte aligned on all platforms, unlike a regular int64.
    //
    // An Int64 must not be copied.
    type Int64 struct {
    	noCopy noCopy
    	_      align64
    	value  int64
    }
    
    // Load accesses and returns the value atomically.
    //
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 14.2K bytes
    - Viewed (0)
  7. src/internal/runtime/atomic/atomic_loong64.s

    	MOVV	ptr+0(FP), R4
    	MOVBU	val+8(FP), R5
    	// Align ptr down to 4 bytes so we can use 32-bit load/store.
    	MOVV	$~3, R6
    	AND	R4, R6
    	// R7 = ((ptr & 3) * 8)
    	AND	$3, R4, R7
    	SLLV	$3, R7
    	// Shift val for aligned ptr. R5 = val << R4
    	SLLV	R7, R5
    
    	DBAR
    	LL	(R6), R7
    	OR	R5, R7
    	SC	R7, (R6)
    	BEQ	R7, -4(PC)
    	DBAR
    	RET
    
    // void	And8(byte volatile*, byte);
    TEXT ·And8(SB), NOSPLIT, $0-9
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  8. src/runtime/sys_windows_arm.s

    // using the initial stack allocated by the OS.
    // It calls back into standard C using the BL below.
    // To do that, the stack pointer must be 8-byte-aligned.
    TEXT runtime·_initcgo(SB),NOSPLIT|NOFRAME,$0
    	MOVM.DB.W [R4, R14], (R13)	// push {r4, lr}
    
    	// Ensure stack is 8-byte aligned before calling C code
    	MOVW	R13, R4
    	BIC	$0x7, R13
    
    	// Allocate a TLS slot to hold g across calls to external code
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Sep 21 15:56:43 UTC 2023
    - 7.7K bytes
    - Viewed (0)
  9. src/syscall/route_bsd.go

    func rsaAlignOf(salen int) int {
    	salign := sizeofPtr
    	if darwin64Bit {
    		// Darwin kernels require 32-bit aligned access to
    		// routing facilities.
    		salign = 4
    	} else if netbsd32Bit {
    		// NetBSD 6 and beyond kernels require 64-bit aligned
    		// access to routing facilities.
    		salign = 8
    	} else if runtime.GOOS == "freebsd" {
    		// In the case of kern.supported_archs="amd64 i386",
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Feb 26 21:03:59 UTC 2024
    - 9.1K bytes
    - Viewed (0)
  10. src/runtime/traceregion.go

    	next *traceRegionAllocBlock
    	off  atomic.Uintptr
    }
    
    const traceRegionAllocBlockData = 64<<10 - unsafe.Sizeof(traceRegionAllocBlockHeader{})
    
    // alloc allocates n-byte block. The block is always aligned to 8 bytes, regardless of platform.
    func (a *traceRegionAlloc) alloc(n uintptr) *notInHeap {
    	n = alignUp(n, 8)
    	if n > traceRegionAllocBlockData {
    		throw("traceRegion: alloc too large")
    	}
    	if a.dropping.Load() {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:47:01 UTC 2024
    - 3.2K bytes
    - Viewed (0)
Back to top