Search Options

Results per page
Sort
Preferred Languages
Advance

Results 61 - 70 of 100 for memstats (0.17 sec)

  1. src/math/big/nat_test.go

    		if prod != r.prod {
    			t.Errorf("#%d: got %s; want %s", i, prod, r.prod)
    		}
    	}
    }
    
    // allocBytes returns the number of bytes allocated by invoking f.
    func allocBytes(f func()) uint64 {
    	var stats runtime.MemStats
    	runtime.ReadMemStats(&stats)
    	t := stats.TotalAlloc
    	f()
    	runtime.ReadMemStats(&stats)
    	return stats.TotalAlloc - t
    }
    
    // TestMulUnbalanced tests that multiplying numbers of different lengths
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Jan 09 15:29:36 UTC 2024
    - 26.2K bytes
    - Viewed (0)
  2. src/runtime/mfinal.go

    		throw("queuefinalizer during GC")
    	}
    
    	lock(&finlock)
    	if finq == nil || finq.cnt == uint32(len(finq.fin)) {
    		if finc == nil {
    			finc = (*finblock)(persistentalloc(_FinBlockSize, 0, &memstats.gcMiscSys))
    			finc.alllink = allfin
    			allfin = finc
    			if finptrmask[0] == 0 {
    				// Build pointer mask for Finalizer array in block.
    				// Check assumptions made in finalizer1 array above.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jun 07 01:56:56 UTC 2024
    - 19K bytes
    - Viewed (0)
  3. src/runtime/netpoll.go

    		n := pollBlockSize / pdSize
    		if n == 0 {
    			n = 1
    		}
    		// Must be in non-GC memory because can be referenced
    		// only from epoll/kqueue internals.
    		mem := persistentalloc(n*pdSize, 0, &memstats.other_sys)
    		for i := uintptr(0); i < n; i++ {
    			pd := (*pollDesc)(add(mem, i*pdSize))
    			lockInit(&pd.lock, lockRankPollDesc)
    			pd.rt.init(nil, nil)
    			pd.wt.init(nil, nil)
    			pd.link = c.first
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 15 19:57:43 UTC 2024
    - 20.7K bytes
    - Viewed (0)
  4. src/runtime/iface.go

    		unlock(&itabLock)
    		goto finish
    	}
    
    	// Entry doesn't exist yet. Make a new entry & add it.
    	m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.Methods)-1)*goarch.PtrSize, 0, &memstats.other_sys))
    	m.Inter = inter
    	m.Type = typ
    	// The hash is used in type switches. However, compiler statically generates itab's
    	// for all interface/type pairs used in switches (which are added to itabTable
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 22.5K bytes
    - Viewed (0)
  5. src/testing/sub_test.go

    				}
    			}
    			b.Run("", func(b *B) {
    				alloc(b)
    				b.ReportAllocs()
    			})
    			b.Run("", func(b *B) {
    				alloc(b)
    				b.ReportAllocs()
    			})
    			// runtime.MemStats sometimes reports more allocations than the
    			// benchmark is responsible for. Luckily the point of this test is
    			// to ensure that the results are not underreported, so we can
    			// simply verify the lower bound.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Dec 01 21:27:08 UTC 2023
    - 23.8K bytes
    - Viewed (0)
  6. src/runtime/mprof.go

    	case memProfile:
    		size += unsafe.Sizeof(memRecord{})
    	case blockProfile, mutexProfile:
    		size += unsafe.Sizeof(blockRecord{})
    	}
    
    	b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys))
    	b.typ = typ
    	b.nstk = uintptr(nstk)
    	return b
    }
    
    // stk returns the slice in b holding the stack. The caller can asssume that the
    // backing array is immutable.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:37 UTC 2024
    - 53.3K bytes
    - Viewed (0)
  7. src/runtime/os_linux.go

    		}
    		throw("newosproc")
    	}
    }
    
    // Version of newosproc that doesn't require a valid G.
    //
    //go:nosplit
    func newosproc0(stacksize uintptr, fn unsafe.Pointer) {
    	stack := sysAlloc(stacksize, &memstats.stacks_sys)
    	if stack == nil {
    		writeErrStr(failallocatestack)
    		exit(1)
    	}
    	ret := clone(cloneFlags, unsafe.Pointer(uintptr(stack)+stacksize), nil, nil, fn)
    	if ret < 0 {
    		writeErrStr(failthreadcreate)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 25.9K bytes
    - Viewed (0)
  8. src/runtime/trace.go

    				throw("trace: reading after shutdown")
    			}
    			// Free all the empty buffers.
    			for trace.empty != nil {
    				buf := trace.empty
    				trace.empty = buf.link
    				sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf), &memstats.other_sys)
    			}
    			// Clear trace.shutdown and other flags.
    			trace.headerWritten = false
    			trace.shutdown.Store(false)
    		}
    		unlock(&trace.lock)
    	})
    
    	if stopTrace {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 21:17:41 UTC 2024
    - 37.1K bytes
    - Viewed (0)
  9. api/go1.5.txt

    pkg reflect, func ArrayOf(int, Type) Type
    pkg reflect, func FuncOf([]Type, []Type, bool) Type
    pkg runtime, func ReadTrace() []uint8
    pkg runtime, func StartTrace() error
    pkg runtime, func StopTrace()
    pkg runtime, type MemStats struct, GCCPUFraction float64
    pkg runtime/trace, func Start(io.Writer) error
    pkg runtime/trace, func Stop()
    pkg strings, func Compare(string, string) int
    pkg strings, func LastIndexByte(string, uint8) int
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Jul 30 21:14:09 UTC 2015
    - 46.6K bytes
    - Viewed (0)
  10. api/go1.txt

    pkg runtime, type MemStats struct, MSpanSys uint64
    pkg runtime, type MemStats struct, Mallocs uint64
    pkg runtime, type MemStats struct, NextGC uint64
    pkg runtime, type MemStats struct, NumGC uint32
    pkg runtime, type MemStats struct, PauseNs [256]uint64
    pkg runtime, type MemStats struct, PauseTotalNs uint64
    pkg runtime, type MemStats struct, StackInuse uint64
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Aug 14 18:58:28 UTC 2013
    - 1.7M bytes
    - Viewed (0)
Back to top