Search Options

Results per page
Sort
Preferred Languages
Advance

Results 121 - 130 of 417 for Malloc (0.17 sec)

  1. src/runtime/mpagealloc_test.go

    			nAlloc := (PallocChunkPages * 4) / int(npages)
    			for i := 0; i < nAlloc; i++ {
    				addr := PageBase(BaseChunkIdx, uint(i)*uint(npages))
    				if a, _ := b.Alloc(npages); a != addr {
    					t.Fatalf("bad alloc #%d: want 0x%x, got 0x%x", i+1, addr, a)
    				}
    			}
    
    			// Check to make sure the next allocation fails.
    			if a, _ := b.Alloc(npages); a != 0 {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Dec 06 19:16:48 UTC 2021
    - 32.6K bytes
    - Viewed (0)
  2. src/runtime/mpagecache.go

    		return c.base + i*pageSize, uintptr(scav) * pageSize
    	}
    	return c.allocN(npages)
    }
    
    // allocN is a helper which attempts to allocate npages worth of pages
    // from the cache. It represents the general case for allocating from
    // the page cache.
    //
    // Returns a base address and the amount of scavenged memory in the
    // allocated region in bytes.
    func (c *pageCache) allocN(npages uintptr) (uintptr, uintptr) {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Apr 19 14:30:00 UTC 2023
    - 5.6K bytes
    - Viewed (0)
  3. src/runtime/mpallocbits.go

    	return (*pageBits)(b).block64(i)
    }
    
    // allocPages64 allocates a 64-bit block of 64 pages aligned to 64 pages according
    // to the bits set in alloc. The block set is the one containing the i'th page.
    func (b *pallocBits) allocPages64(i uint, alloc uint64) {
    	(*pageBits)(b).setBlock64(i, alloc)
    }
    
    // findBitRange64 returns the bit index of the first set of
    // n consecutive 1 bits. If no consecutive set of 1 bits of
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sat May 18 15:13:43 UTC 2024
    - 12.5K bytes
    - Viewed (0)
  4. src/runtime/metrics_test.go

    		t.Error("allocs-by-size and frees-by-size counts don't match in length")
    	} else {
    		for i := range objects.alloc.Buckets {
    			ba := objects.alloc.Buckets[i]
    			bf := objects.free.Buckets[i]
    			if ba != bf {
    				t.Errorf("bucket %d is different for alloc and free hists: %f != %f", i, ba, bf)
    			}
    		}
    		if !t.Failed() {
    			var gotAlloc, gotFree uint64
    			want := objects.total
    			for i := range objects.alloc.Counts {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:52:17 UTC 2024
    - 45K bytes
    - Viewed (0)
  5. pkg/registry/core/service/storage/alloc.go

    		if i > (len(service.Spec.ClusterIPs) - 1) {
    			service.Spec.ClusterIPs = append(service.Spec.ClusterIPs, "" /* just a marker */)
    		}
    
    		toAlloc[ipFamily] = service.Spec.ClusterIPs[i]
    	}
    
    	// allocate
    	allocated, err := al.allocIPs(service, toAlloc, dryRun)
    
    	// set if successful
    	if err == nil {
    		for family, ip := range allocated {
    			for i, check := range service.Spec.IPFamilies {
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Tue Oct 31 21:05:05 UTC 2023
    - 37.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/experimental/remat/rematerializer.cc

      // We're cloning operations [remat.begin, remat.end) at position
      // remat.insert. We store changes to the alloc/dealloc sizes due to the
      // insertion in a vector `delta`: A change `c_alloc` of `operations_[i].alloc`
      // as `delta[i] += c_alloc`, and a change `c_dealloc` of
      // `operations_[i].dealloc` as `delta[i+1] -= c_dealloc`.
      std::vector<MemSpec> deltas;
    
      if (remat.begin == remat.end) {
        return deltas;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 14 20:57:44 UTC 2023
    - 13.7K bytes
    - Viewed (0)
  7. test/codegen/alloc.go

    Iskander Sharipov <******@****.***> 1546022404 +0300
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Feb 26 23:08:15 UTC 2019
    - 902 bytes
    - Viewed (0)
  8. src/strings/builder_test.go

    		}
    	})
    }
    
    func TestBuilderGrowSizeclasses(t *testing.T) {
    	s := Repeat("a", 19)
    	allocs := testing.AllocsPerRun(100, func() {
    		var b Builder
    		b.Grow(18)
    		b.WriteString(s)
    		_ = b.String()
    	})
    	if allocs > 1 {
    		t.Fatalf("unexpected amount of allocations: %v, want: 1", allocs)
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Feb 19 19:51:15 UTC 2024
    - 8.1K bytes
    - Viewed (0)
  9. src/runtime/mgcsweep.go

    				s.reportZombies()
    			}
    		}
    	}
    
    	// Count the number of free objects in this span.
    	nalloc := uint16(s.countAlloc())
    	nfreed := s.allocCount - nalloc
    	if nalloc > s.allocCount {
    		// The zombie check above should have caught this in
    		// more detail.
    		print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
    		throw("sweep increased allocation count")
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:52:18 UTC 2024
    - 32.9K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/experimental/remat/rematerializer.h

                                   // to input and output tensors. This vector is
                                   // kept sorted + unique.
    
        SizeT alloc = 0;    // The number of bytes that need to be allocated before
                            // this operation.
        SizeT dealloc = 0;  // The number of bytes that can be deallocated after
                            // this operation.
      };
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 14 20:57:44 UTC 2023
    - 12K bytes
    - Viewed (0)
Back to top