Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 316 for sweeping (0.29 sec)

  1. src/runtime/mgcsweep.go

    	return nil
    }
    
    const sweepDrainedMask = 1 << 31
    
    // activeSweep is a type that captures whether sweeping
    // is done, and whether there are any outstanding sweepers.
    //
    // Every potential sweeper must call begin() before they look
    // for work, and end() after they've finished sweeping.
    type activeSweep struct {
    	// state is divided into two parts.
    	//
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:52:18 UTC 2024
    - 32.9K bytes
    - Viewed (0)
  2. src/runtime/mcentral.go

    		// Set sweepgen to indicate it's not cached but needs
    		// sweeping and can't be allocated from. sweep will
    		// set s.sweepgen to indicate s is swept.
    		atomic.Store(&s.sweepgen, sg-1)
    	} else {
    		// Indicate that s is no longer cached.
    		atomic.Store(&s.sweepgen, sg)
    	}
    
    	// Put the span in the appropriate place.
    	if stale {
    		// It's stale, so just sweep it. Sweeping will put it on
    		// the right list.
    		//
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 8.1K bytes
    - Viewed (0)
  3. src/runtime/mgc.go

    	// we can go ahead and publish the heap profile.
    	//
    	// First, wait for sweeping to finish. (We know there are no
    	// more spans on the sweep queue, but we may be concurrently
    	// sweeping spans, so we have to wait.)
    	for work.cycles.Load() == n+1 && !isSweepDone() {
    		Gosched()
    	}
    
    	// Now we're really done with sweeping, so we can publish the
    	// stable heap profile. Only do this if we haven't already hit
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 16:25:21 UTC 2024
    - 62K bytes
    - Viewed (0)
  4. src/runtime/mheap.go

    // constrained as follows:
    //
    //   - A span may transition from free to in-use or manual during any GC
    //     phase.
    //
    //   - During sweeping (gcphase == _GCoff), a span may transition from
    //     in-use to free (as a result of sweeping) or manual to free (as a
    //     result of stacks being freed).
    //
    //   - During GC (gcphase != _GCoff), a span *must not* transition from
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 78K bytes
    - Viewed (0)
  5. src/runtime/pinner.go

    		}
    		// This is a linker-allocated, zero size object or other object,
    		// nothing to do, silently ignore it.
    		return false
    	}
    
    	// ensure that the span is swept, b/c sweeping accesses the specials list
    	// w/o locks.
    	mp := acquirem()
    	span.ensureSwept()
    	KeepAlive(ptr) // make sure ptr is still alive after span is swept
    
    	objIndex := span.objIndex(uintptr(ptr))
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 04 14:29:45 UTC 2024
    - 11K bytes
    - Viewed (0)
  6. src/runtime/mcache.go

    	if s == nil {
    		throw("out of memory")
    	}
    
    	if s.allocCount == s.nelems {
    		throw("span has no free space")
    	}
    
    	// Indicate that this span is cached and prevent asynchronous
    	// sweeping in the next sweep phase.
    	s.sweepgen = mheap_.sweepgen + 3
    
    	// Store the current alloc count for accounting later.
    	s.allocCountBeforeCache = s.allocCount
    
    	// Update heapLive and flush scanAlloc.
    	//
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 10K bytes
    - Viewed (0)
  7. src/runtime/stack.go

    	}
    	x.ptr().next = s.manualFreeList
    	s.manualFreeList = x
    	s.allocCount--
    	if gcphase == _GCoff && s.allocCount == 0 {
    		// Span is completely free. Return it to the heap
    		// immediately if we're sweeping.
    		//
    		// If GC is active, we delay the free until the end of
    		// GC to avoid the following type of situation:
    		//
    		// 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 41.1K bytes
    - Viewed (0)
  8. src/runtime/mstats.go

    	// well as unreachable objects that the garbage collector has
    	// not yet freed. Specifically, HeapAlloc increases as heap
    	// objects are allocated and decreases as the heap is swept
    	// and unreachable objects are freed. Sweeping occurs
    	// incrementally between GC cycles, so these two processes
    	// occur simultaneously, and as a result HeapAlloc tends to
    	// change smoothly (in contrast with the sawtooth that is
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Apr 08 21:03:13 UTC 2024
    - 34.2K bytes
    - Viewed (0)
  9. src/runtime/mprof.go

    	// is required to obtain a consistent picture of mallocs and frees
    	// for some point in time.
    	// The problem is that mallocs come in real time, while frees
    	// come only after a GC during concurrent sweeping. So if we would
    	// naively count them, we would get a skew toward mallocs.
    	//
    	// Hence, we delay information to get consistent snapshots as
    	// of mark termination. Allocations count toward the next mark
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:37 UTC 2024
    - 53.3K bytes
    - Viewed (0)
  10. src/runtime/heapdump.go

    			dumpint(uint64(uintptr(unsafe.Pointer(spp.b))))
    		}
    	}
    }
    
    var dumphdr = []byte("go1.7 heap dump\n")
    
    func mdump(m *MemStats) {
    	assertWorldStopped()
    
    	// make sure we're done sweeping
    	for _, s := range mheap_.allspans {
    		if s.state.get() == mSpanInUse {
    			s.ensureSwept()
    		}
    	}
    	memclrNoHeapPointers(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache))
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Apr 09 04:07:57 UTC 2024
    - 17.6K bytes
    - Viewed (0)
Back to top