Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 9 of 9 for allocmcache (0.16 sec)

  1. src/runtime/mcache.go

    }
    
    type stackfreelist struct {
    	list gclinkptr // linked list of free stacks
    	size uintptr   // total size of stacks in list
    }
    
    // dummy mspan that contains no free objects.
    var emptymspan mspan
    
    func allocmcache() *mcache {
    	var c *mcache
    	systemstack(func() {
    		lock(&mheap_.lock)
    		c = (*mcache)(mheap_.cachealloc.alloc())
    		c.flushGen.Store(mheap_.sweepgen)
    		unlock(&mheap_.lock)
    	})
    	for i := range c.alloc {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 10K bytes
    - Viewed (0)
  2. src/runtime/malloc.go

    func nextFreeFast(s *mspan) gclinkptr {
    	theBit := sys.TrailingZeros64(s.allocCache) // Is there a free object in the allocCache?
    	if theBit < 64 {
    		result := s.freeindex + uint16(theBit)
    		if result < s.nelems {
    			freeidx := result + 1
    			if freeidx%64 == 0 && freeidx != s.nelems {
    				return 0
    			}
    			s.allocCache >>= uint(theBit + 1)
    			s.freeindex = freeidx
    			s.allocCount++
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 59.6K bytes
    - Viewed (0)
  3. src/runtime/mcentral.go

    	}
    	freeByteBase := s.freeindex &^ (64 - 1)
    	whichByte := freeByteBase / 8
    	// Init alloc bits cache.
    	s.refillAllocCache(whichByte)
    
    	// Adjust the allocCache so that s.freeindex corresponds to the low bit in
    	// s.allocCache.
    	s.allocCache >>= s.freeindex % 64
    
    	return s
    }
    
    // Return span from an mcache.
    //
    // s must have a span class corresponding to this
    // mcentral and it must not be empty.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 8.1K bytes
    - Viewed (0)
  4. src/runtime/mpagecache.go

    	*c = pageCache{}
    }
    
    // allocToCache acquires a pageCachePages-aligned chunk of free pages which
    // may not be contiguous, and returns a pageCache structure which owns the
    // chunk.
    //
    // p.mheapLock must be held.
    //
    // Must run on the system stack because p.mheapLock must be held.
    //
    //go:systemstack
    func (p *pageAlloc) allocToCache() pageCache {
    	assertLockHeld(p.mheapLock)
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Apr 19 14:30:00 UTC 2023
    - 5.6K bytes
    - Viewed (0)
  5. src/runtime/mbitmap.go

    		// As each 1 in s.allocCache was encountered and used for allocation
    		// it was shifted away. At this point s.allocCache contains all 0s.
    		// Refill s.allocCache so that it corresponds
    		// to the bits at s.allocBits starting at s.freeindex.
    		whichByte := sfreeindex / 8
    		s.refillAllocCache(whichByte)
    	}
    	s.freeindex = sfreeindex
    	return result
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 00:18:55 UTC 2024
    - 60K bytes
    - Viewed (0)
  6. src/runtime/mpagecache_test.go

    		v := v
    		t.Run(name, func(t *testing.T) {
    			b := NewPageAlloc(v.beforeAlloc, v.beforeScav)
    			defer FreePageAlloc(b)
    
    			for _, expect := range v.hits {
    				checkPageCache(t, b.AllocToCache(), expect)
    				if t.Failed() {
    					return
    				}
    			}
    			want := NewPageAlloc(v.afterAlloc, v.afterScav)
    			defer FreePageAlloc(want)
    
    			checkPageAlloc(t, want, b)
    		})
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Dec 06 19:16:48 UTC 2021
    - 10.8K bytes
    - Viewed (0)
  7. src/runtime/mheap.go

    	freeIndexForScan uint16
    
    	// Cache of the allocBits at freeindex. allocCache is shifted
    	// such that the lowest bit corresponds to the bit freeindex.
    	// allocCache holds the complement of allocBits, thus allowing
    	// ctz (count trailing zero) to use it directly.
    	// allocCache may contain bits beyond s.nelems; the caller must ignore
    	// these.
    	allocCache uint64
    
    	// allocBits and gcmarkBits hold pointers to a span's mark and
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 78K bytes
    - Viewed (0)
  8. src/runtime/proc.go

    			if mcache0 == nil {
    				throw("missing mcache?")
    			}
    			// Use the bootstrap mcache0. Only one P will get
    			// mcache0: the one with ID 0.
    			pp.mcache = mcache0
    		} else {
    			pp.mcache = allocmcache()
    		}
    	}
    	if raceenabled && pp.raceprocctx == 0 {
    		if id == 0 {
    			pp.raceprocctx = raceprocctx0
    			raceprocctx0 = 0 // bootstrap
    		} else {
    			pp.raceprocctx = raceproccreate()
    		}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 207.5K bytes
    - Viewed (0)
  9. src/runtime/export_test.go

    		unlock(pp.mheapLock)
    	})
    	return addr, scav
    }
    func (p *PageAlloc) AllocToCache() PageCache {
    	pp := (*pageAlloc)(p)
    
    	var c PageCache
    	systemstack(func() {
    		// None of the tests need any higher-level locking, so we just
    		// take the lock internally.
    		lock(pp.mheapLock)
    		c = PageCache(pp.allocToCache())
    		unlock(pp.mheapLock)
    	})
    	return c
    }
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:50:53 UTC 2024
    - 46.1K bytes
    - Viewed (0)
Back to top