Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 86 for Allocations (0.14 sec)

  1. src/runtime/mprof.go

    //
    // The returned profile may be up to two garbage collection cycles old.
    // This is to avoid skewing the profile toward allocations; because
    // allocations happen in real time but frees are delayed until the garbage
    // collector performs sweeping, the profile only accounts for allocations
    // that have had a chance to be freed by the garbage collector.
    //
    // Most clients should use the runtime/pprof package or
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:37 UTC 2024
    - 53.3K bytes
    - Viewed (0)
  2. src/runtime/malloc.go

    	// arenaL2Bits is the number of bits of the arena number
    	// covered by the second level arena index.
    	//
    	// The size of each arena map allocation is proportional to
    	// 1<<arenaL2Bits, so it's important that this not be too
    	// large. 48 bits leads to 32MB arena index allocations, which
    	// is about the practical threshold.
    	arenaL2Bits = heapAddrBits - logHeapArenaBytes - arenaL1Bits
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 59.6K bytes
    - Viewed (0)
  3. src/runtime/mheap.go

    	// allocations. Where the page size is less than the physical page
    	// size, we already manage to do this by default.
    	needPhysPageAlign := physPageAlignedStacks && typ == spanAllocStack && pageSize < physPageSize
    
    	// If the allocation is small enough, try the page cache!
    	// The page cache does not support aligned allocations, so we cannot use
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 78K bytes
    - Viewed (0)
  4. src/runtime/mgc.go

    	// allocations are blocked until assists can
    	// happen, we want to enable assists as early as
    	// possible.
    	setGCPhase(_GCmark)
    
    	gcBgMarkPrepare() // Must happen before assists are enabled.
    	gcMarkRootPrepare()
    
    	// Mark all active tinyalloc blocks. Since we're
    	// allocating from these, they need to be black like
    	// other allocations. The alternative is to blacken
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 16:25:21 UTC 2024
    - 62K bytes
    - Viewed (0)
  5. src/runtime/mgcpacer.go

    	// and unscavenged memory, pushing the goal down significantly.
    	//
    	// heapFree is also safe to exclude from the memory limit because in the steady-state, it's
    	// just a pool of memory for future heap allocations, and making new allocations from heapFree
    	// memory doesn't increase overall memory use. In transient states, the scavenger and the
    	// allocator actively manage the pool of heapFree memory to maintain the memory limit.
    	//
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 55.4K bytes
    - Viewed (0)
  6. src/runtime/mbitmap.go

    // The caller is also responsible for cgo pointer checks if this
    // may be writing Go pointers into non-Go memory.
    //
    // Pointer data is not maintained for allocations containing
    // no pointers at all; any caller of bulkBarrierPreWrite must first
    // make sure the underlying allocation contains pointers, usually
    // by checking typ.PtrBytes.
    //
    // The typ argument is the type of the space at src and dst (and the
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 00:18:55 UTC 2024
    - 60K bytes
    - Viewed (0)
  7. staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go

    	deadline, _ := ctx.Deadline()
    
    	identifier := fmt.Sprintf("key: %q, labels: %q, fields: %q", key, pred.Label, pred.Field)
    
    	// Create a watcher here to reduce memory allocations under lock,
    	// given that memory allocation may trigger GC and block the thread.
    	// Also note that emptyFunc is a placeholder, until we will be able
    	// to compute watcher.forget function (which has to happen under lock).
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Wed Jun 12 10:12:02 UTC 2024
    - 51.8K bytes
    - Viewed (0)
  8. src/math/big/int_test.go

    		x := NewInt(3)
    		got := testing.AllocsPerRun(100, func() {
    			// NewInt should inline, and all its allocations
    			// can happen on the stack. Passing the result of NewInt
    			// to Add should not cause any of those allocations to escape.
    			x.Add(x, NewInt(n))
    		})
    		if got != 0 {
    			t.Errorf("x.Add(x, NewInt(%d)), wanted 0 allocations, got %f", n, got)
    		}
    	}
    }
    
    func TestFloat64(t *testing.T) {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 18:42:28 UTC 2024
    - 58.5K bytes
    - Viewed (0)
  9. src/cmd/compile/internal/ssa/regalloc.go

    // TODO
    
    // Use an affinity graph to mark two values which should use the
    // same register. This affinity graph will be used to prefer certain
    // registers for allocation. This affinity helps eliminate moves that
    // are required for phi implementations and helps generate allocations
    // for 2-register architectures.
    
    // Note: regalloc generates a not-quite-SSA output. If we have:
    //
    //             b1: x = ... : AX
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Nov 21 17:49:56 UTC 2023
    - 87.2K bytes
    - Viewed (0)
  10. pilot/pkg/serviceregistry/serviceentry/controller_test.go

    		}
    		if v, ok := gotIPMap[svc.AutoAllocatedIPv4Address]; ok && v != svc.Hostname.String() {
    			t.Errorf("multiple allocations of same IP address to different services with different hostname: %s", svc.AutoAllocatedIPv4Address)
    		}
    		gotIPMap[svc.AutoAllocatedIPv4Address] = svc.Hostname.String()
    		// Validate that IP address is valid.
    Registered: Fri Jun 14 15:00:06 UTC 2024
    - Last Modified: Wed May 29 15:31:09 UTC 2024
    - 92.9K bytes
    - Viewed (0)
Back to top