- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 503 for allocators (0.17 sec)
-
tensorflow/c/kernels/summary_op_test.cc
OpKernelContext::Params params; DummyDevice dummy_device(nullptr); params.device = &dummy_device; params.op_kernel = kernel.get(); AllocatorAttributes alloc_attrs; params.output_attr_array = &alloc_attrs; gtl::InlinedVector<TensorValue, 4> inputs; inputs.emplace_back(tags); inputs.emplace_back(values); params.inputs = inputs; OpKernelContext ctx(¶ms, 1); kernel->Compute(&ctx);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jul 18 15:10:51 UTC 2022 - 6.7K bytes - Viewed (0) -
pkg/registry/core/service/portallocator/operation.go
// It is better to leak ports than to double-allocate them, // so we allocate immediately, but defer release. // On commit we best-effort release the deferred releases. // On rollback we best-effort release any allocations we did. // // Pattern for use: // // op := StartPortAllocationOperation(...) // defer op.Finish // ... // write(updatedOwner) // // / op.Commit() type PortAllocationOperation struct {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Jul 26 17:14:05 UTC 2022 - 4.2K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device.h
// The platform for this device. se::Platform* const platform_; // Not owned. // Intra-op threads to spawn (from SessionOptions). const int intra_op_parallelism_threads_; // Memory allocator associated with this device. Allocator* xla_allocator_ TF_GUARDED_BY(mu_) = nullptr; // Not owned. std::unique_ptr<AsyncValueAllocator> pjrt_allocator_ TF_GUARDED_BY(mu_); // Stream associated with this device. Operations enqueued on this
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 13.4K bytes - Viewed (0) -
src/runtime/internal/sys/nih.go
// // The last point is the real benefit of NotInHeap. The runtime uses // it for low-level internal structures to avoid memory barriers in the // scheduler and the memory allocator where they are illegal or simply // inefficient. This mechanism is reasonably safe and does not compromise // the readability of the runtime.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Sep 02 18:24:50 UTC 2022 - 1.7K bytes - Viewed (0) -
pkg/registry/core/service/ipallocator/bitmap.go
} return &r, nil } // NewInMemory creates an in-memory allocator. func NewInMemory(cidr *net.IPNet) (*Range, error) { return New(cidr, func(max int, rangeSpec string, offset int) (allocator.Interface, error) { return allocator.NewAllocationMapWithOffset(max, rangeSpec, offset), nil }) } // NewFromSnapshot allocates a Range and initializes it from a snapshot.
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed Jan 25 20:32:40 UTC 2023 - 10.8K bytes - Viewed (0) -
pkg/registry/core/service/ipallocator/ipallocator.go
return len(ips) } // for testing, it assumes this is the allocator is unique for the ipFamily func (a *Allocator) Free() int { return int(a.size) - a.Used() } // Destroy func (a *Allocator) Destroy() { } // DryRun func (a *Allocator) DryRun() Interface { return dryRunAllocator{a} } // EnableMetrics func (a *Allocator) EnableMetrics() { registerMetrics()
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Oct 31 21:05:04 UTC 2023 - 17K bytes - Viewed (0) -
src/runtime/testdata/testprog/gc.go
memLimitSink[i] = new([memLimitUnit]byte) // Write to this memory to slow down the allocator, otherwise // we get flaky behavior. See #52433. for j := range memLimitSink[i] { memLimitSink[i][j] = 9 } } // Again, Gosched to slow down the allocator. runtime.Gosched() select { case newTarget := <-target: if newTarget == math.MaxInt64 { return
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sun Oct 02 02:28:27 UTC 2022 - 12.1K bytes - Viewed (0) -
src/runtime/mcache.go
// so they are grouped here for better caching. nextSample uintptr // trigger heap sample after allocating this many bytes scanAlloc uintptr // bytes of scannable heap allocated // Allocator cache for tiny objects w/o pointers. // See "Tiny allocator" comment in malloc.go. // tiny points to the beginning of the current tiny block, or // nil if there is no current tiny block. //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 10K bytes - Viewed (0) -
pkg/registry/core/service/portallocator/allocator_test.go
clearMetrics() // create NodePort allocator portRange := "30000-32766" pr, err := net.ParsePortRange(portRange) if err != nil { t.Fatal(err) } a, err := NewInMemory(*pr) if err != nil { t.Fatalf("unexpected error creating nodeport allocator: %v", err) } a.EnableMetrics() // create metrics disabled allocator with same port range // this metrics should be ignored
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed May 08 07:15:02 UTC 2024 - 14K bytes - Viewed (0) -
staging/src/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go
if memAlloc == nil { klog.Error("a mandatory memory allocator wasn't provided, this might have a negative impact on performance, check invocations of EncodeWithAllocator method, falling back on runtime.SimpleAllocator") memAlloc = &runtime.SimpleAllocator{} } switch t := obj.(type) { case bufferedReverseMarshaller: // this path performs a single allocation during write only when the Allocator wasn't provided
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed Feb 23 13:38:23 UTC 2022 - 17.8K bytes - Viewed (0)