- Sort Score
- Result 10 results
- Languages All
Results 1 - 7 of 7 for chunkIndex (0.52 sec)
-
src/runtime/mpagealloc.go
// Fast path: we're clearing a single bit, and we know exactly // where it is, so mark it directly. i := chunkIndex(base) pi := chunkPageIndex(base) p.chunkOf(i).free1(pi) p.scav.index.free(i, pi, 1) } else { // Slow path: we're clearing more bits so we may need to iterate. sc, ec := chunkIndex(base), chunkIndex(limit) si, ei := chunkPageIndex(base), chunkPageIndex(limit) if sc == ec {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 39.2K bytes - Viewed (0) -
src/runtime/mpagealloc_64bit.go
// because it comes from the OS, so it's sufficient to align the // index. haveMin := s.min.Load() haveMax := s.max.Load() needMin := alignDown(uintptr(chunkIndex(base)), physPageSize/scSize) needMax := alignUp(uintptr(chunkIndex(limit)), physPageSize/scSize) // We need a contiguous range, so extend the range if there's no overlap. if needMax < haveMin { needMax = haveMin }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Jan 03 11:00:10 UTC 2024 - 9.3K bytes - Viewed (0) -
src/runtime/export_test.go
} func (s *ScavengeIndex) Find(force bool) (ChunkIdx, uint) { ci, off := s.i.find(force) return ChunkIdx(ci), off } func (s *ScavengeIndex) AllocRange(base, limit uintptr) { sc, ec := chunkIndex(base), chunkIndex(limit-1) si, ei := chunkPageIndex(base), chunkPageIndex(limit-1) if sc == ec { // The range doesn't cross any chunk boundaries. s.i.alloc(sc, ei+1-si) } else {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:50:53 UTC 2024 - 46.1K bytes - Viewed (0) -
src/runtime/mgcscavenge.go
// Update minHeapIdx. Note that even if there's no mapping work to do, // we may still have a new, lower minimum heap address. minHeapIdx := s.minHeapIdx.Load() if baseIdx := uintptr(chunkIndex(base)); minHeapIdx == 0 || baseIdx < minHeapIdx { s.minHeapIdx.Store(baseIdx) } return s.sysGrow(base, limit, sysStat) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:48:45 UTC 2024 - 52.3K bytes - Viewed (0) -
tensorflow/cc/saved_model/fingerprinting_utils_test.cc
std::string expected_pruned_chunked_message_text_proto = R"pb( chunk_index: 0 chunked_fields { field_tag { field: 1 } message { chunk_index: 1 } } chunked_fields { field_tag { field: 7 } field_tag { map_key { boolean: true } } message { chunk_index: 2 } } )pb"; ChunkedMessage expected_pruned_chunked_message;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Feb 20 22:19:55 UTC 2024 - 15.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.cc
Type new_element_type = IntegerType::get(attr.getContext(), storage_bit_width_); return attr.mapValues(new_element_type, [&](const APFloat &old) { int chunk_index = flatten_index / chunk_size; flatten_index++; return converters[chunk_index % dim_size].quantizeFloatToInt(old); }); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 4.3K bytes - Viewed (0) -
tensorflow/cc/saved_model/fingerprinting_utils.cc
std::string chunk, ReadChunk(reader, chunks_info[chunked_message.chunk_index()])); field_checksum = FingerprintCat64(field_checksum, Fingerprint64(chunk)); } else if (matches == field_tags.size()) { // chunked_field_tags are an exact match, but chunked_field is further // broken down into separate chunked_fields (no chunk_index). Hash those // chunked_fields. TF_ASSIGN_OR_RETURN(uint64_t hash,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Feb 20 22:19:55 UTC 2024 - 20.2K bytes - Viewed (0)