Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 642 for mspan (0.04 sec)

  1. src/runtime/traceallocfree.go

    }
    
    // SpanAlloc records an event indicating that the span has just been allocated.
    func (tl traceLocker) SpanAlloc(s *mspan) {
    	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSpanAlloc, traceSpanID(s), traceArg(s.npages), traceSpanTypeAndClass(s))
    }
    
    // SpanFree records an event indicating that the span is about to be freed.
    func (tl traceLocker) SpanFree(s *mspan) {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 20:32:51 UTC 2024
    - 5.9K bytes
    - Viewed (0)
  2. src/runtime/mheap.go

    	if span.list != list {
    		print("runtime: failed mSpanList.remove span.npages=", span.npages,
    			" span=", span, " prev=", span.prev, " span.list=", span.list, " list=", list, "\n")
    		throw("mSpanList.remove")
    	}
    	if list.first == span {
    		list.first = span.next
    	} else {
    		span.prev.next = span.next
    	}
    	if list.last == span {
    		list.last = span.prev
    	} else {
    		span.next.prev = span.prev
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 78K bytes
    - Viewed (0)
  3. src/runtime/os_nonopenbsd.go

    //go:build !openbsd
    
    package runtime
    
    // osStackAlloc performs OS-specific initialization before s is used
    // as stack memory.
    func osStackAlloc(s *mspan) {
    }
    
    // osStackFree undoes the effect of osStackAlloc before s is returned
    // to the heap.
    func osStackFree(s *mspan) {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Oct 28 18:17:57 UTC 2021
    - 437 bytes
    - Viewed (0)
  4. src/runtime/mcentral.go

    	return s
    }
    
    // Return span from an mcache.
    //
    // s must have a span class corresponding to this
    // mcentral and it must not be empty.
    func (c *mcentral) uncacheSpan(s *mspan) {
    	if s.allocCount == 0 {
    		throw("uncaching span but s.allocCount == 0")
    	}
    
    	sg := mheap_.sweepgen
    	stale := s.sweepgen == sg+1
    
    	// Fix up sweepgen.
    	if stale {
    		// Span was cached before sweep began. It's our
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 8.1K bytes
    - Viewed (0)
  5. src/runtime/mspanset.go

    	h.u.Store(0)
    }
    
    // atomicMSpanPointer is an atomic.Pointer[mspan]. Can't use generics because it's NotInHeap.
    type atomicMSpanPointer struct {
    	p atomic.UnsafePointer
    }
    
    // Load returns the *mspan.
    func (p *atomicMSpanPointer) Load() *mspan {
    	return (*mspan)(p.p.Load())
    }
    
    // Store stores an *mspan.
    func (p *atomicMSpanPointer) StoreNoWB(s *mspan) {
    	p.p.StoreNoWB(unsafe.Pointer(s))
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 13.1K bytes
    - Viewed (0)
  6. src/runtime/mgcsweep.go

    		throw("mspan.sweep: m is not locked")
    	}
    
    	s := sl.mspan
    	if !preserve {
    		// We'll release ownership of this span. Nil it out to
    		// prevent the caller from accidentally using it.
    		sl.mspan = nil
    	}
    
    	sweepgen := mheap_.sweepgen
    	if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:52:18 UTC 2024
    - 32.9K bytes
    - Viewed (0)
  7. src/runtime/pinner.go

    	span.ensureSwept()
    	KeepAlive(ptr) // make sure ptr is still alive after span is swept
    
    	objIndex := span.objIndex(uintptr(ptr))
    
    	lock(&span.speciallock) // guard against concurrent calls of setPinned on same span
    
    	pinnerBits := span.getPinnerBits()
    	if pinnerBits == nil {
    		pinnerBits = span.newPinnerBits()
    		span.setPinnerBits(pinnerBits)
    	}
    	pinState := pinnerBits.ofObject(objIndex)
    	if pin {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 04 14:29:45 UTC 2024
    - 11K bytes
    - Viewed (0)
  8. src/runtime/arena.go

    		rzSize := computeRZlog(span.elemsize)
    		span.elemsize -= rzSize
    		span.largeType.Size_ = span.elemsize
    		rzStart := span.base() + span.elemsize
    		span.userArenaChunkFree = makeAddrRange(span.base(), rzStart)
    		asanpoison(unsafe.Pointer(rzStart), span.limit-rzStart)
    		asanunpoison(unsafe.Pointer(span.base()), span.elemsize)
    	}
    
    	if rate := MemProfileRate; rate > 0 {
    		c := getMCache(mp)
    		if c == nil {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:44:56 UTC 2024
    - 37.9K bytes
    - Viewed (0)
  9. src/runtime/mbitmap.go

    	}
    	return tp
    }
    
    // objBase returns the base pointer for the object containing addr in span.
    //
    // Assumes that addr points into a valid part of span (span.base() <= addr < span.limit).
    //
    //go:nosplit
    func (span *mspan) objBase(addr uintptr) uintptr {
    	return span.base() + span.objIndex(addr)*span.elemsize
    }
    
    // bulkBarrierPreWrite executes a write barrier
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 00:18:55 UTC 2024
    - 60K bytes
    - Viewed (0)
  10. src/runtime/os_openbsd.go

    }
    
    //go:nosplit
    func validSIGPROF(mp *m, c *sigctxt) bool {
    	return true
    }
    
    func osStackAlloc(s *mspan) {
    	osStackRemap(s, _MAP_STACK)
    }
    
    func osStackFree(s *mspan) {
    	// Undo MAP_STACK.
    	osStackRemap(s, 0)
    }
    
    func osStackRemap(s *mspan, flags int32) {
    	a, err := mmap(unsafe.Pointer(s.base()), s.npages*pageSize, _PROT_READ|_PROT_WRITE, _MAP_PRIVATE|_MAP_ANON|_MAP_FIXED|flags, -1, 0)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 6.2K bytes
    - Viewed (0)
Back to top