Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 3 of 3 for chunkIndex (0.38 sec)

  1. src/runtime/mpagecache.go

    	assertLockHeld(p.mheapLock)
    
    	// If the searchAddr refers to a region which has a higher address than
    	// any known chunk, then we know we're out of memory.
    	if chunkIndex(p.searchAddr.addr()) >= p.end {
    		return pageCache{}
    	}
    	c := pageCache{}
    	ci := chunkIndex(p.searchAddr.addr()) // chunk index
    	var chunk *pallocData
    	if p.summary[len(p.summary)-1][ci] != 0 {
    		// Fast path: there's free pages at or near the searchAddr address.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Apr 19 14:30:00 UTC 2023
    - 5.6K bytes
    - Viewed (0)
  2. src/runtime/mpagealloc_64bit.go

    	// because it comes from the OS, so it's sufficient to align the
    	// index.
    	haveMin := s.min.Load()
    	haveMax := s.max.Load()
    	needMin := alignDown(uintptr(chunkIndex(base)), physPageSize/scSize)
    	needMax := alignUp(uintptr(chunkIndex(limit)), physPageSize/scSize)
    
    	// We need a contiguous range, so extend the range if there's no overlap.
    	if needMax < haveMin {
    		needMax = haveMin
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Jan 03 11:00:10 UTC 2024
    - 9.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.cc

      Type new_element_type =
          IntegerType::get(attr.getContext(), storage_bit_width_);
      return attr.mapValues(new_element_type, [&](const APFloat &old) {
        int chunk_index = flatten_index / chunk_size;
        flatten_index++;
        return converters[chunk_index % dim_size].quantizeFloatToInt(old);
      });
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 02:10:16 UTC 2024
    - 4.3K bytes
    - Viewed (0)
Back to top