Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 37 for memstats (0.14 sec)

  1. src/cmd/link/internal/ld/main.go

    		if err != nil {
    			log.Fatalf("%v", err)
    		}
    		AtExit(func() {
    			// Profile all outstanding allocations.
    			runtime.GC()
    			// compilebench parses the memory profile to extract memstats,
    			// which are only written in the legacy pprof format.
    			// See golang.org/issue/18641 and runtime/pprof/pprof.go:writeHeap.
    			const writeLegacyFormat = 1
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 16:59:50 UTC 2024
    - 16.6K bytes
    - Viewed (0)
  2. src/runtime/mgcscavenge.go

    				nbytes := int64(npages * pageSize)
    				gcController.heapReleased.add(nbytes)
    				gcController.heapFree.add(-nbytes)
    
    				stats := memstats.heapStats.acquire()
    				atomic.Xaddint64(&stats.committed, -nbytes)
    				atomic.Xaddint64(&stats.released, nbytes)
    				memstats.heapStats.release()
    			}
    
    			// Relock the heap, because now we need to make these pages
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:48:45 UTC 2024
    - 52.3K bytes
    - Viewed (0)
  3. src/runtime/stack.go

    	}
    	if stackDebug >= 1 {
    		print("stackalloc ", n, "\n")
    	}
    
    	if debug.efence != 0 || stackFromSystem != 0 {
    		n = uint32(alignUp(uintptr(n), physPageSize))
    		v := sysAlloc(uintptr(n), &memstats.stacks_sys)
    		if v == nil {
    			throw("out of memory (stackalloc)")
    		}
    		return stack{uintptr(v), uintptr(v) + uintptr(n)}
    	}
    
    	// Small stacks are allocated with a fixed-size free-list allocator.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 41.1K bytes
    - Viewed (0)
  4. src/time/tick_test.go

    	})
    }
    
    func TestTimerGC(t *testing.T) {
    	run := func(t *testing.T, what string, f func()) {
    		t.Helper()
    		t.Run(what, func(t *testing.T) {
    			t.Helper()
    			const N = 1e4
    			var stats runtime.MemStats
    			runtime.GC()
    			runtime.GC()
    			runtime.GC()
    			runtime.ReadMemStats(&stats)
    			before := int64(stats.Mallocs - stats.Frees)
    
    			for j := 0; j < N; j++ {
    				f()
    			}
    
    			runtime.GC()
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Jun 11 17:10:37 UTC 2024
    - 14.7K bytes
    - Viewed (0)
  5. src/runtime/mfinal.go

    		throw("queuefinalizer during GC")
    	}
    
    	lock(&finlock)
    	if finq == nil || finq.cnt == uint32(len(finq.fin)) {
    		if finc == nil {
    			finc = (*finblock)(persistentalloc(_FinBlockSize, 0, &memstats.gcMiscSys))
    			finc.alllink = allfin
    			allfin = finc
    			if finptrmask[0] == 0 {
    				// Build pointer mask for Finalizer array in block.
    				// Check assumptions made in finalizer1 array above.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jun 07 01:56:56 UTC 2024
    - 19K bytes
    - Viewed (0)
  6. src/runtime/netpoll.go

    		n := pollBlockSize / pdSize
    		if n == 0 {
    			n = 1
    		}
    		// Must be in non-GC memory because can be referenced
    		// only from epoll/kqueue internals.
    		mem := persistentalloc(n*pdSize, 0, &memstats.other_sys)
    		for i := uintptr(0); i < n; i++ {
    			pd := (*pollDesc)(add(mem, i*pdSize))
    			lockInit(&pd.lock, lockRankPollDesc)
    			pd.rt.init(nil, nil)
    			pd.wt.init(nil, nil)
    			pd.link = c.first
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 15 19:57:43 UTC 2024
    - 20.7K bytes
    - Viewed (0)
  7. src/runtime/iface.go

    		unlock(&itabLock)
    		goto finish
    	}
    
    	// Entry doesn't exist yet. Make a new entry & add it.
    	m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.Methods)-1)*goarch.PtrSize, 0, &memstats.other_sys))
    	m.Inter = inter
    	m.Type = typ
    	// The hash is used in type switches. However, compiler statically generates itab's
    	// for all interface/type pairs used in switches (which are added to itabTable
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 22.5K bytes
    - Viewed (0)
  8. src/runtime/mprof.go

    	case memProfile:
    		size += unsafe.Sizeof(memRecord{})
    	case blockProfile, mutexProfile:
    		size += unsafe.Sizeof(blockRecord{})
    	}
    
    	b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys))
    	b.typ = typ
    	b.nstk = uintptr(nstk)
    	return b
    }
    
    // stk returns the slice in b holding the stack. The caller can asssume that the
    // backing array is immutable.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:37 UTC 2024
    - 53.3K bytes
    - Viewed (0)
  9. src/runtime/trace.go

    				throw("trace: reading after shutdown")
    			}
    			// Free all the empty buffers.
    			for trace.empty != nil {
    				buf := trace.empty
    				trace.empty = buf.link
    				sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf), &memstats.other_sys)
    			}
    			// Clear trace.shutdown and other flags.
    			trace.headerWritten = false
    			trace.shutdown.Store(false)
    		}
    		unlock(&trace.lock)
    	})
    
    	if stopTrace {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 21:17:41 UTC 2024
    - 37.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/jit/xla_device_context.cc

    std::optional<AllocatorStats> XlaDeviceAllocator::GetStats() {
      std::optional<stream_executor::AllocatorStats> se_stats =
          stream_executor_->GetAllocatorStats();
      if (!se_stats) {
        return std::nullopt;
      }
    
      tensorflow::AllocatorStats tf_stats;
      tf_stats.num_allocs = se_stats->num_allocs;
      tf_stats.bytes_in_use = se_stats->bytes_in_use;
      tf_stats.peak_bytes_in_use = se_stats->peak_bytes_in_use;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 00:36:08 UTC 2024
    - 12.7K bytes
    - Viewed (0)
Back to top