- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 168 for Malloc (0.11 sec)
-
pkg/registry/core/service/storage/alloc.go
if i > (len(service.Spec.ClusterIPs) - 1) { service.Spec.ClusterIPs = append(service.Spec.ClusterIPs, "" /* just a marker */) } toAlloc[ipFamily] = service.Spec.ClusterIPs[i] } // allocate allocated, err := al.allocIPs(service, toAlloc, dryRun) // set if successful if err == nil { for family, ip := range allocated { for i, check := range service.Spec.IPFamilies {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Oct 31 21:05:05 UTC 2023 - 37.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/remat/rematerializer.cc
// We're cloning operations [remat.begin, remat.end) at position // remat.insert. We store changes to the alloc/dealloc sizes due to the // insertion in a vector `delta`: A change `c_alloc` of `operations_[i].alloc` // as `delta[i] += c_alloc`, and a change `c_dealloc` of // `operations_[i].dealloc` as `delta[i+1] -= c_dealloc`. std::vector<MemSpec> deltas; if (remat.begin == remat.end) { return deltas;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 14 20:57:44 UTC 2023 - 13.7K bytes - Viewed (0) -
src/runtime/mgcsweep.go
s.reportZombies() } } } // Count the number of free objects in this span. nalloc := uint16(s.countAlloc()) nfreed := s.allocCount - nalloc if nalloc > s.allocCount { // The zombie check above should have caught this in // more detail. print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n") throw("sweep increased allocation count") }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:52:18 UTC 2024 - 32.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/remat/rematerializer.h
// to input and output tensors. This vector is // kept sorted + unique. SizeT alloc = 0; // The number of bytes that need to be allocated before // this operation. SizeT dealloc = 0; // The number of bytes that can be deallocated after // this operation. };
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 14 20:57:44 UTC 2023 - 12K bytes - Viewed (0) -
src/runtime/mpagealloc.go
// space into chunks. type chunkIdx uint // chunkIndex returns the global index of the palloc chunk containing the // pointer p. func chunkIndex(p uintptr) chunkIdx { return chunkIdx((p - arenaBaseOffset) / pallocChunkBytes) } // chunkBase returns the base address of the palloc chunk at index ci. func chunkBase(ci chunkIdx) uintptr { return uintptr(ci)*pallocChunkBytes + arenaBaseOffset }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 39.2K bytes - Viewed (0) -
src/runtime/pprof/pprof.go
// Pprof will ignore, but useful for people s := memStats fmt.Fprintf(w, "\n# runtime.MemStats\n") fmt.Fprintf(w, "# Alloc = %d\n", s.Alloc) fmt.Fprintf(w, "# TotalAlloc = %d\n", s.TotalAlloc) fmt.Fprintf(w, "# Sys = %d\n", s.Sys) fmt.Fprintf(w, "# Lookups = %d\n", s.Lookups) fmt.Fprintf(w, "# Mallocs = %d\n", s.Mallocs) fmt.Fprintf(w, "# Frees = %d\n", s.Frees) fmt.Fprintf(w, "# HeapAlloc = %d\n", s.HeapAlloc)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:52:17 UTC 2024 - 30.6K bytes - Viewed (0) -
src/runtime/testdata/testprog/gc.go
// Alternate between whether the chunk will be held live or will be // condemned to GC to create holes in the heap. saved := make([][]byte, allocs/2+1) condemned := make([][]byte, allocs/2) for i := 0; i < allocs; i++ { b := make([]byte, allocChunk) if i%2 == 0 { saved = append(saved, b) } else { condemned = append(condemned, b) } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sun Oct 02 02:28:27 UTC 2022 - 12.1K bytes - Viewed (0) -
pkg/registry/core/service/ipallocator/bitmap.go
return a } return b } // Free returns the count of IP addresses left in the range. func (r *Range) Free() int { return r.alloc.Free() } // Used returns the count of IP addresses used in the range. func (r *Range) Used() int { return r.max - r.alloc.Free() } // CIDR returns the CIDR covered by the range. func (r *Range) CIDR() net.IPNet { return *r.net }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed Jan 25 20:32:40 UTC 2023 - 10.8K bytes - Viewed (0) -
src/runtime/mgcscavenge_test.go
"math/rand" . "runtime" "testing" "time" ) // makePallocData produces an initialized PallocData by setting // the ranges of described in alloc and scavenge. func makePallocData(alloc, scavenged []BitRange) *PallocData { b := new(PallocData) for _, v := range alloc { if v.N == 0 { // Skip N==0. It's harmless and allocRange doesn't // handle this case. continue } b.AllocRange(v.I, v.N) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 25.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/pick_subgraphs.cc
aggregated_cost_with_decisions; }; // If the output is produced by a callop, will return the callop, otherwise, // will return nullptr. inline func::CallOp GetProducerCallOpOrNull(Value output) { Operation* output_op = output.getDefiningOp(); if (output_op != nullptr && llvm::isa<func::CallOp>(output_op)) { return llvm::cast<func::CallOp>(output_op); } return nullptr; } class PickSubgraphsPass
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Nov 24 15:10:02 UTC 2022 - 19.7K bytes - Viewed (0)