Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 27 for nalloc (0.36 sec)

  1. src/runtime/malloc.go

    			minSizeForMallocHeaderIsSizeClass = true
    			break
    		}
    	}
    	if !minSizeForMallocHeaderIsSizeClass {
    		throw("min size of malloc header is not a size class boundary")
    	}
    	// Check that the pointer bitmap for all small sizes without a malloc header
    	// fits in a word.
    	if minSizeForMallocHeader/goarch.PtrSize > 8*goarch.PtrSize {
    		throw("max pointer/scan bitmap size for headerless objects is too large")
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 59.6K bytes
    - Viewed (0)
  2. src/runtime/mprof.go

    	// The following complex 3-stage scheme of stats accumulation
    	// is required to obtain a consistent picture of mallocs and frees
    	// for some point in time.
    	// The problem is that mallocs come in real time, while frees
    	// come only after a GC during concurrent sweeping. So if we would
    	// naively count them, we would get a skew toward mallocs.
    	//
    	// Hence, we delay information to get consistent snapshots as
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:37 UTC 2024
    - 53.3K bytes
    - Viewed (0)
  3. src/runtime/mheap.go

    func (s spanAllocType) manual() bool {
    	return s != spanAllocHeap
    }
    
    // alloc allocates a new span of npage pages from the GC'd heap.
    //
    // spanclass indicates the span's size class and scannability.
    //
    // Returns a span that has been fully initialized. span.needzero indicates
    // whether the span has been zeroed. Note that it may not be.
    func (h *mheap) alloc(npages uintptr, spanclass spanClass) *mspan {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 78K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/stablehlo/transforms/compose_uniform_quantized_type_pass.cc

        return GetInverseScalesConstantOp().getValue();
      }
    
      func::CallOp GetCallOp() { return call_op_; }
    
      FlatSymbolRefAttr GetFunction() { return call_op_.getCalleeAttr(); }
    
     private:
      explicit UniformQuantizeFunctionCallPattern(func::CallOp call_op)
          : call_op_(call_op) {}
    
      func::CallOp call_op_;
    };
    
    // Matches the following pattern that represents uniform dequantization.
    //
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 64.6K bytes
    - Viewed (0)
  5. src/cmd/cgo/out.go

    const char *_cgoPREFIX_Cfunc_CString(struct __go_string s) {
    	char *p = malloc(s.__length+1);
    	if(p == NULL)
    		runtime_throw("runtime: C malloc failed");
    	memmove(p, s.__data, s.__length);
    	p[s.__length] = 0;
    	return p;
    }
    
    void *_cgoPREFIX_Cfunc_CBytes(struct __go_open_array b) {
    	char *p = malloc(b.__count);
    	if(p == NULL)
    		runtime_throw("runtime: C malloc failed");
    	memmove(p, b.__values, b.__count);
    	return p;
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Mar 29 16:41:10 UTC 2024
    - 59.6K bytes
    - Viewed (0)
  6. src/runtime/mgcscavenge.go

    // the allocation is part of a bigger one and it's probably not worth
    // eagerly collapsing).
    //
    // alloc may only run concurrently with find.
    func (s *scavengeIndex) alloc(ci chunkIdx, npages uint) {
    	sc := s.chunks[ci].load()
    	sc.alloc(npages, s.gen)
    	// TODO(mknyszek): Consider eagerly backing memory with huge pages
    	// here and track whether we believe this chunk is backed by huge pages.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:48:45 UTC 2024
    - 52.3K bytes
    - Viewed (0)
  7. src/runtime/mbitmap.go

    	"runtime/internal/sys"
    	"unsafe"
    )
    
    const (
    	// A malloc header is functionally a single type pointer, but
    	// we need to use 8 here to ensure 8-byte alignment of allocations
    	// on 32-bit platforms. It's wasteful, but a lot of code relies on
    	// 8-byte alignment for 8-byte atomics.
    	mallocHeaderSize = 8
    
    	// The minimum object size that has a malloc header, exclusive.
    	//
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 00:18:55 UTC 2024
    - 60K bytes
    - Viewed (0)
  8. src/time/time_test.go

    func TestCountMallocs(t *testing.T) {
    	if testing.Short() {
    		t.Skip("skipping malloc count in short mode")
    	}
    	if runtime.GOMAXPROCS(0) > 1 {
    		t.Skip("skipping; GOMAXPROCS>1")
    	}
    	for _, mt := range mallocTest {
    		allocs := int(testing.AllocsPerRun(100, mt.fn))
    		if allocs > mt.count {
    			t.Errorf("%s: %d allocs, want %d", mt.desc, allocs, mt.count)
    		}
    	}
    }
    
    func TestLoadFixed(t *testing.T) {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 03:13:47 UTC 2024
    - 56.5K bytes
    - Viewed (0)
  9. src/net/netip/netip_test.go

    				// Optimizations are required to remove some allocs.
    				t.Skipf("skipping on %v", testenv.Builder())
    			}
    			allocs := int(testing.AllocsPerRun(1000, func() {
    				sinkString = tc.ip.String()
    			}))
    			if allocs != tc.wantAllocs {
    				t.Errorf("allocs=%d, want %d", allocs, tc.wantAllocs)
    			}
    		})
    	}
    }
    
    func TestPrefixString(t *testing.T) {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Jun 04 17:10:01 UTC 2024
    - 54.3K bytes
    - Viewed (0)
  10. src/bytes/bytes_test.go

    func TestEqual(t *testing.T) {
    	// Run the tests and check for allocation at the same time.
    	allocs := testing.AllocsPerRun(10, func() {
    		for _, tt := range compareTests {
    			eql := Equal(tt.a, tt.b)
    			if eql != (tt.i == 0) {
    				t.Errorf(`Equal(%q, %q) = %v`, tt.a, tt.b, eql)
    			}
    		}
    	})
    	if allocs > 0 {
    		t.Errorf("Equal allocated %v times", allocs)
    	}
    }
    
    func TestEqualExhaustive(t *testing.T) {
    	var size = 128
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 03 12:58:37 UTC 2024
    - 56.5K bytes
    - Viewed (0)
Back to top