Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 110 for mspan (0.05 sec)

  1. src/runtime/traceallocfree.go

    }
    
    // SpanAlloc records an event indicating that the span has just been allocated.
    func (tl traceLocker) SpanAlloc(s *mspan) {
    	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSpanAlloc, traceSpanID(s), traceArg(s.npages), traceSpanTypeAndClass(s))
    }
    
    // SpanFree records an event indicating that the span is about to be freed.
    func (tl traceLocker) SpanFree(s *mspan) {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 20:32:51 UTC 2024
    - 5.9K bytes
    - Viewed (0)
  2. src/runtime/mheap.go

    	if span.list != list {
    		print("runtime: failed mSpanList.remove span.npages=", span.npages,
    			" span=", span, " prev=", span.prev, " span.list=", span.list, " list=", list, "\n")
    		throw("mSpanList.remove")
    	}
    	if list.first == span {
    		list.first = span.next
    	} else {
    		span.prev.next = span.next
    	}
    	if list.last == span {
    		list.last = span.prev
    	} else {
    		span.next.prev = span.prev
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 78K bytes
    - Viewed (0)
  3. src/runtime/mbitmap.go

    	}
    	return tp
    }
    
    // objBase returns the base pointer for the object containing addr in span.
    //
    // Assumes that addr points into a valid part of span (span.base() <= addr < span.limit).
    //
    //go:nosplit
    func (span *mspan) objBase(addr uintptr) uintptr {
    	return span.base() + span.objIndex(addr)*span.elemsize
    }
    
    // bulkBarrierPreWrite executes a write barrier
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 00:18:55 UTC 2024
    - 60K bytes
    - Viewed (0)
  4. src/runtime/malloc.go

    		span = c.allocLarge(size, noscan)
    		span.freeindex = 1
    		span.allocCount = 1
    		size = span.elemsize
    		x = unsafe.Pointer(span.base())
    		if needzero && span.needzero != 0 {
    			delayedZeroing = true
    		}
    		if !noscan {
    			// Tell the GC not to look at this yet.
    			span.largeType = nil
    			header = &span.largeType
    		}
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 59.6K bytes
    - Viewed (0)
  5. src/runtime/export_test.go

    	}
    	return false
    }
    
    // mspan wrapper for testing.
    type MSpan mspan
    
    // Allocate an mspan for testing.
    func AllocMSpan() *MSpan {
    	var s *mspan
    	systemstack(func() {
    		lock(&mheap_.lock)
    		s = (*mspan)(mheap_.spanalloc.alloc())
    		unlock(&mheap_.lock)
    	})
    	return (*MSpan)(s)
    }
    
    // Free an allocated mspan.
    func FreeMSpan(s *MSpan) {
    	systemstack(func() {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:50:53 UTC 2024
    - 46.1K bytes
    - Viewed (0)
  6. src/runtime/metrics/doc.go

    		Memory that is occupied by runtime mcache structures that are
    		currently being used.
    
    	/memory/classes/metadata/mspan/free:bytes
    		Memory that is reserved for runtime mspan structures, but not
    		in-use.
    
    	/memory/classes/metadata/mspan/inuse:bytes
    		Memory that is occupied by runtime mspan structures that are
    		currently being used.
    
    	/memory/classes/metadata/other:bytes
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:58:43 UTC 2024
    - 20K bytes
    - Viewed (0)
  7. src/runtime/stack.go

    	if s.state.get() != mSpanManual {
    		throw("freeing stack not in a stack span")
    	}
    	if s.manualFreeList.ptr() == nil {
    		// s will now have a free stack
    		stackpool[order].item.span.insert(s)
    	}
    	x.ptr().next = s.manualFreeList
    	s.manualFreeList = x
    	s.allocCount--
    	if gcphase == _GCoff && s.allocCount == 0 {
    		// Span is completely free. Return it to the heap
    		// immediately if we're sweeping.
    		//
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 41.1K bytes
    - Viewed (0)
  8. src/runtime/gc_test.go

    	for i := 0; i < b.N; i++ {
    		b.StartTimer()
    		runtime.GC()
    		runtime.GC()
    		b.StopTimer()
    	}
    	close(teardown)
    }
    
    func BenchmarkMSpanCountAlloc(b *testing.B) {
    	// Allocate one dummy mspan for the whole benchmark.
    	s := runtime.AllocMSpan()
    	defer runtime.FreeMSpan(s)
    
    	// n is the number of bytes to benchmark against.
    	// n must always be a multiple of 8, since gcBits is
    	// always rounded up 8 bytes.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Jun 05 22:33:52 UTC 2024
    - 17.6K bytes
    - Viewed (0)
  9. src/runtime/metrics_test.go

    		case "/memory/classes/metadata/mcache/inuse:bytes":
    			checkUint64(t, name, samples[i].Value.Uint64(), mstats.MCacheInuse)
    		case "/memory/classes/metadata/mspan/free:bytes":
    			checkUint64(t, name, samples[i].Value.Uint64(), mstats.MSpanSys-mstats.MSpanInuse)
    		case "/memory/classes/metadata/mspan/inuse:bytes":
    			checkUint64(t, name, samples[i].Value.Uint64(), mstats.MSpanInuse)
    		case "/memory/classes/metadata/other:bytes":
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:52:17 UTC 2024
    - 45K bytes
    - Viewed (0)
  10. src/runtime/runtime2.go

    	sudogbuf   [128]*sudog
    
    	// Cache of mspan objects from the heap.
    	mspancache struct {
    		// We need an explicit length here because this field is used
    		// in allocation codepaths where write barriers are not allowed,
    		// and eliminating the write barrier/keeping it eliminated from
    		// slice updates is tricky, more so than just managing the length
    		// ourselves.
    		len int
    		buf [128]*mspan
    	}
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:37 UTC 2024
    - 47.9K bytes
    - Viewed (0)
Back to top