- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 37 for Accounting (0.22 sec)
-
pkg/kubelet/cm/container_manager_linux.go
// cgroup will be the closest ancestor where accounting is performed (most likely /) on systems that launch docker containers. // as a result, on those systems, you will not get cpu or memory accounting statistics for kubelet. // in addition, you would not get memory or cpu accounting for the runtime unless accounting was enabled on its unit (or globally). if systemd, found := cgs["name=systemd"]; found {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue May 21 10:18:16 UTC 2024 - 35.1K bytes - Viewed (0) -
src/runtime/cgocall.go
// foreign code. // // The call to asmcgocall is guaranteed not to // grow the stack and does not allocate memory, // so it is safe to call while "in a system call", outside // the $GOMAXPROCS accounting. // // fn may call back into Go code, in which case we'll exit the // "system call", run the Go code (which may grow the stack), // and then re-enter the "system call" reusing the PC and SP
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:16:47 UTC 2024 - 24.2K bytes - Viewed (0) -
pkg/controller/statefulset/stateful_set.go
ssc.addPod(logger, obj) }, // lookup current and old statefulset if labels changed UpdateFunc: func(oldObj, newObj interface{}) { ssc.updatePod(logger, oldObj, newObj) }, // lookup statefulset accounting for deletion tombstones DeleteFunc: func(obj interface{}) { ssc.deletePod(logger, obj) }, }) ssc.podLister = podInformer.Lister() ssc.podListerSynced = podInformer.Informer().HasSynced
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed Jun 05 19:06:41 UTC 2024 - 18.5K bytes - Viewed (0) -
pkg/volume/configmap/configmap.go
return volume.Spec{ // This should be on a tmpfs instead of the local disk; the problem is // charging the memory for the tmpfs to the right cgroup. We should make // this a tmpfs when we can do the accounting correctly. Volume: &v1.Volume{VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}}, } } func (b *configMapVolumeMounter) SetUp(mounterArgs volume.MounterArgs) error {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue May 14 06:17:25 UTC 2024 - 10K bytes - Viewed (0) -
src/runtime/mgclimit.go
// // The reason we do this instead of just waiting for those events to finish and push updates // is to ensure that all the time we're accounting for happened sometime between lastUpdate // and now. This dramatically simplifies reasoning about the limiter because we're not at // risk of extra time being accounted for in this window than actually happened in this window,
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 22 22:07:41 UTC 2024 - 17.3K bytes - Viewed (0) -
src/runtime/arena.go
// Failed to allocate. mp.mallocing = 0 releasem(mp) return nil } if s.needzero != 0 { throw("arena chunk needs zeroing, but should already be zeroed") } // Set up heap bitmap and do extra accounting. if typ.Pointers() { if cap >= 0 { userArenaHeapBitsSetSliceType(typ, cap, ptr, s) } else { userArenaHeapBitsSetType(typ, ptr, s) } c := getMCache(mp) if c == nil {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:44:56 UTC 2024 - 37.9K bytes - Viewed (0) -
src/runtime/metrics_test.go
// // Instead, check against a much more reasonable upper-bound: the amount of // mapped heap memory. We can't possibly overcount to the point of exceeding // total mapped heap memory, except if there's an accounting bug. if live := samples[i].Value.Uint64(); live > mstats.HeapSys { t.Errorf("live bytes: %d > heap sys: %d", live, mstats.HeapSys) } else if live == 0 { // Might happen if we don't call runtime.GC() above.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:52:17 UTC 2024 - 45K bytes - Viewed (0) -
src/internal/trace/gc.go
pi++ continue } out[0][mi].Util -= float64(1) / float64(procs[pi].n) if out[0][mi].Util < 0 { out[0][mi].Util = 0 } mi++ } } // After accounting for the portion we missed, this just acts like the // beginning of a new range. fallthrough case EventRangeBegin: r := ev.Range() if handleSTW(r) { stw++ } else if handleSweep(r) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 26K bytes - Viewed (0) -
pkg/kubelet/server/server.go
namespace = v } if v, ok := c.Spec.Labels[kubelettypes.KubernetesContainerNameLabel]; ok { containerName = v } // Associate pod cgroup with pod so we have an accurate accounting of sandbox if podName == "" && namespace == "" { if pod, found := s.GetPodByCgroupfs(c.Name); found { podName = pod.Name namespace = pod.Namespace } } set := map[string]string{
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Jun 04 06:25:43 UTC 2024 - 40.1K bytes - Viewed (0) -
src/cmd/cover/cover.go
} return s.End() } // endsBasicSourceBlock reports whether s changes the flow of control: break, if, etc., // or if it's just problematic, for instance contains a function literal, which will complicate // accounting due to the block-within-an expression. func (f *File) endsBasicSourceBlock(s ast.Stmt) bool { switch s := s.(type) { case *ast.BlockStmt: // Treat blocks like basic blocks to avoid overlapping counters.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 14 19:41:17 UTC 2024 - 34.5K bytes - Viewed (0)