Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 175 for aligned (0.49 sec)

  1. src/runtime/mpallocbits.go

    }
    
    // pages64 returns a 64-bit bitmap representing a block of 64 pages aligned
    // to 64 pages. The returned block of pages is the one containing the i'th
    // page in this pallocBits. Each bit represents whether the page is in-use.
    func (b *pallocBits) pages64(i uint) uint64 {
    	return (*pageBits)(b).block64(i)
    }
    
    // allocPages64 allocates a 64-bit block of 64 pages aligned to 64 pages according
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sat May 18 15:13:43 UTC 2024
    - 12.5K bytes
    - Viewed (0)
  2. internal/ioutil/ioutil.go

    // directio.AlignSize is defined as 0 in MacOS causing divide by 0 error.
    const DirectioAlignSize = 4096
    
    // CopyAligned - copies from reader to writer using the aligned input
    // buffer, it is expected that input buffer is page aligned to
    // 4K page boundaries. Without passing aligned buffer may cause
    // this function to return error.
    //
    // This code is similar in spirit to io.Copy but it is only to be
    Registered: Sun Jun 16 00:44:34 UTC 2024
    - Last Modified: Wed May 22 23:07:14 UTC 2024
    - 10.2K bytes
    - Viewed (0)
  3. pkg/kubelet/cm/devicemanager/topology_hints.go

    	m.mutex.Lock()
    	defer m.mutex.Unlock()
    	for resource, requested := range accumulatedResourceRequests {
    		// Only consider devices that actually contain topology information.
    		if aligned := m.deviceHasTopologyAlignment(resource); !aligned {
    			klog.InfoS("Resource does not have a topology preference", "resource", resource)
    			deviceHints[resource] = nil
    			continue
    		}
    
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Sat Jan 27 02:10:25 UTC 2024
    - 9.9K bytes
    - Viewed (0)
  4. src/runtime/malloc_test.go

    	// nothing else allocates from it.
    	runtime.Acquirem()
    
    	// Make 1-byte allocations until we get a fresh tiny slot.
    	aligned := false
    	for i := 0; i < 16; i++ {
    		x := runtime.Escape(new(byte))
    		if uintptr(unsafe.Pointer(x))&0xf == 0xf {
    			aligned = true
    			break
    		}
    	}
    	if !aligned {
    		runtime.Releasem()
    		t.Fatal("unable to get a fresh tiny slot")
    	}
    
    	// Create a 4-byte object so that the current
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Sep 05 23:35:29 UTC 2023
    - 10.6K bytes
    - Viewed (0)
  5. pkg/kubelet/cm/cpumanager/policy_static.go

    	alignedCPUs := cpuset.New()
    	numaBits := numaAffinity.GetBits()
    
    	// If align-by-socket policy option is enabled, NUMA based hint is expanded to
    	// socket aligned hint. It will ensure that first socket aligned available CPUs are
    	// allocated before we try to find CPUs across socket to satisfy allocation request.
    	if p.options.AlignBySocket {
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Fri Oct 06 13:16:15 UTC 2023
    - 28.8K bytes
    - Viewed (0)
  6. src/internal/runtime/atomic/types.go

    // Lock is a no-op used by -copylocks checker from `go vet`.
    func (*noCopy) Lock()   {}
    func (*noCopy) Unlock() {}
    
    // align64 may be added to structs that must be 64-bit aligned.
    // This struct is recognized by a special case in the compiler
    // and will not work if copied to any other package.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 14.2K bytes
    - Viewed (0)
  7. pkg/kubelet/cm/devicemanager/manager.go

    		if err != nil {
    			return nil, err
    		}
    		if allocateRemainingFrom(preferred.Intersection(aligned)) {
    			return allocated, nil
    		}
    		// Then fallback to allocate from the aligned set if no preferred list
    		// is returned (or not enough devices are returned in that list).
    		if allocateRemainingFrom(aligned) {
    			return allocated, nil
    		}
    
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Mon Apr 15 12:01:56 UTC 2024
    - 43K bytes
    - Viewed (0)
  8. src/runtime/pinner.go

    func (s *mspan) refreshPinnerBits() {
    	p := s.getPinnerBits()
    	if p == nil {
    		return
    	}
    
    	hasPins := false
    	bytes := alignUp(s.pinnerBitSize(), 8)
    
    	// Iterate over each 8-byte chunk and check for pins. Note that
    	// newPinnerBits guarantees that pinnerBits will be 8-byte aligned, so we
    	// don't have to worry about edge cases, irrelevant bits will simply be
    	// zero.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 04 14:29:45 UTC 2024
    - 11K bytes
    - Viewed (0)
  9. src/runtime/stubs.go

    //
    // *ptr is uninitialized memory (e.g., memory that's being reused
    // for a new allocation) and hence contains only "junk".
    //
    // memclrNoHeapPointers ensures that if ptr is pointer-aligned, and n
    // is a multiple of the pointer size, then any pointer-aligned,
    // pointer-sized portion is cleared atomically. Despite the function
    // name, this is necessary because this function is the underlying
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 20.2K bytes
    - Viewed (0)
  10. src/runtime/syscall_windows.go

    		// caller reserved spill space.
    		p.dstSpill = alignUp(p.dstSpill, uintptr(t.Align_))
    		p.dstSpill += t.Size_
    	} else {
    		// Register assignment failed.
    		// Undo the work and stack assign.
    		p.parts = oldParts
    
    		// The Go ABI aligns arguments.
    		p.dstStackSize = alignUp(p.dstStackSize, uintptr(t.Align_))
    
    		// Copy just the size of the argument. Note that this
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 20:12:46 UTC 2024
    - 16.6K bytes
    - Viewed (0)
Back to top