- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 179 for allocators_ (0.34 sec)
-
tensorflow/compiler/jit/xla_device.cc
} Status status; if (alloc_attrs.on_host()) { *tensor = parsed; } else { Allocator* allocator; { mutex_lock lock(mu_); allocator = GetAllocatorLocked(alloc_attrs); } Tensor copy(allocator, parsed.dtype(), parsed.shape()); TF_RETURN_IF_ERROR( device_context->CopyCPUTensorToDeviceSync(&parsed, this, ©)); *tensor = copy;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 21:05:42 UTC 2024 - 24.3K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/allocators.go
} type derived struct { name string // name for alloc/free functions typ string // the type they return/accept base string // underlying allocator } func genAllocators() { allocators := []allocator{ { name: "ValueSlice", typ: "[]*Value", capacity: "cap(%s)", mak: "make([]*Value, %s)", resize: "%s[:%s]",
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Nov 17 23:34:11 UTC 2023 - 6.7K bytes - Viewed (0) -
staging/src/k8s.io/apimachinery/pkg/runtime/allocator.go
var AllocatorPool = sync.Pool{ New: func() interface{} { return &Allocator{} }, } // Allocator knows how to allocate memory // It exists to make the cost of object serialization cheaper. // In some cases, it allows for allocating memory only once and then reusing it. // This approach puts less load on GC and leads to less fragmented memory in general. type Allocator struct { buf []byte } var _ MemoryAllocator = &Allocator{}
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed Jul 27 03:17:50 UTC 2022 - 2.2K bytes - Viewed (0) -
pkg/registry/core/service/portallocator/allocator.go
} return a, err } // NewInMemory creates an in-memory allocator. func NewInMemory(pr net.PortRange) (*PortAllocator, error) { return New(pr, func(max int, rangeSpec string, offset int) (allocator.Interface, error) { return allocator.NewAllocationMapWithOffset(max, rangeSpec, offset), nil }) } // NewFromSnapshot allocates a PortAllocator and initializes it from a snapshot.
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed May 08 07:15:02 UTC 2024 - 7.4K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/allocators.go
// Code generated from _gen/allocators.go using 'go generate'; DO NOT EDIT. package ssa import ( "internal/unsafeheader" "math/bits" "sync" "unsafe" ) var poolFreeValueSlice [27]sync.Pool func (c *Cache) allocValueSlice(n int) []*Value { var s []*Value n2 := n if n2 < 32 { n2 = 32 } b := bits.Len(uint(n2 - 1)) v := poolFreeValueSlice[b-5].Get() if v == nil { s = make([]*Value, 1<<b)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Feb 15 23:00:54 UTC 2023 - 7.4K bytes - Viewed (0) -
pkg/registry/core/service/ipallocator/cidrallocator.go
// MetaAllocator maintains a Tree with the ServiceCIDRs containing an IP Allocator // on the nodes. Since each allocator doesn't stored the IPAddresses because it reads // them from the informer cache, it is cheap to create and delete IP Allocators. // MetaAllocator forwards the request to any of the internal allocators that has free // addresses. // MetaAllocator implements current allocator interface using // ServiceCIDR and IPAddress API objects.
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 13.2K bytes - Viewed (0) -
pkg/controller/nodeipam/ipam/doc.go
*/ // Package ipam provides different allocators for assigning IP ranges to nodes. // We currently support several kinds of IPAM allocators (these are denoted by // the CIDRAllocatorType): // - RangeAllocator is an allocator that assigns PodCIDRs to nodes and works // in conjunction with the RouteController to configure the network to get // connectivity. // - CloudAllocator is an allocator that synchronizes PodCIDRs from IP
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Jul 26 17:14:05 UTC 2022 - 1.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util.h
XlaTensorBuffer(const void* ptr, size_t expected_size, size_t actual_size, Allocator* allocator) : TensorBuffer(const_cast<void*>(ptr)), expected_size_(expected_size), actual_size_(actual_size), allocator_(allocator) {} ~XlaTensorBuffer() override { if (data()) { allocator_->DeallocateRaw(data()); } }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 11.8K bytes - Viewed (0) -
pkg/registry/core/service/storage/alloc.go
return allocated, err } func (al *Allocators) allocIPs(service *api.Service, toAlloc map[api.IPFamily]string, dryRun bool) (map[api.IPFamily]string, error) { allocated := make(map[api.IPFamily]string) for family, ip := range toAlloc { allocator := al.serviceIPAllocatorsByFamily[family] // should always be there, as we pre validate if dryRun { allocator = allocator.DryRun() } if ip == "" {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Oct 31 21:05:05 UTC 2023 - 37.3K bytes - Viewed (0) -
pkg/registry/core/rest/storage_core.go
primaryClusterIPAllocator, err = ipallocator.New(&serviceClusterIPRange, func(max int, rangeSpec string, offset int) (allocator.Interface, error) { var mem allocator.Snapshottable mem = allocator.NewAllocationMapWithOffset(max, rangeSpec, offset) // TODO etcdallocator package to return a storage interface via the storageFactory
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Oct 31 21:05:05 UTC 2023 - 19.1K bytes - Viewed (0)