- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 14 for genAllocator (0.24 sec)
-
pkg/registry/core/service/ipallocator/cidrallocator_test.go
// wait for the cidr to be processed and set the informer synced err = wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) { allocator, err := r.getAllocator(netutils.ParseIPSloppy("192.168.0.1")) if err != nil { t.Logf("unexpected error %v", err) return false, nil } allocator.ipAddressSynced = func() bool { return true }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Oct 31 21:05:06 UTC 2023 - 13.7K bytes - Viewed (0) -
pkg/registry/core/service/ipallocator/cidrallocator.go
} } func (c *MetaAllocator) AllocateService(service *api.Service, ip net.IP) error { allocator, err := c.getAllocator(ip) if err != nil { return err } return allocator.AllocateService(service, ip) } func (c *MetaAllocator) Allocate(ip net.IP) error { allocator, err := c.getAllocator(ip) if err != nil { return err } return allocator.Allocate(ip) }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 13.2K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_platform_info.cc
custom_allocator); } std::shared_ptr<se::DeviceMemoryAllocator> GetAllocator( DeviceBase* device, se::Stream* stream, const XlaPlatformInfo& platform_info) { if (platform_info.custom_allocator()) { return platform_info.custom_allocator(); } auto* alloc = device->GetAllocator({}); if (!stream) { // Stream is not set for the host platform. se::Platform* platform =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 17:23:27 UTC 2024 - 17.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util_gpu_test.cc
// Get the host allocator. AllocatorAttributes host_alloc_attr; host_alloc_attr.set_on_host(true); host_allocator_ = device_->GetAllocator(host_alloc_attr); AllocatorAttributes device_alloc_attr; device_alloc_attr.set_on_host(false); device_allocator_ = device_->GetAllocator(device_alloc_attr); // Create the DeviceCompiler to help with compiling executables.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 10K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_async.cc
if (!op.getResults().empty()) p << " : " << op.getResults().size(); } void ExecuteOpWithAllocator::print(OpAsmPrinter &p) { ExecuteOpWithAllocator op = *this; p << "(" << op.getAllocator() << ") key(" << op->getAttrOfType<mlir::IntegerAttr>("op_key").getInt() << ") cost(" << op->getAttrOfType<mlir::IntegerAttr>("_tfrt_cost").getInt()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 08 01:19:25 UTC 2023 - 15.6K bytes - Viewed (0) -
tensorflow/compiler/jit/kernels/xla_ops.cc
for (int i = 0; i < resources.size(); i++) { resource_var_ptrs[resources[i]] = variable_infos[i].var()->tensor(); } std::shared_ptr<se::DeviceMemoryAllocator> allocator = GetAllocator(ctx->device(), GetStream(ctx), platform_info); XlaComputationLaunchContext launch_context = GetLaunchContext(platform_info, ctx, client, allocator.get());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 41.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device.cc
.set_allowed_devices(allowed_devices_) .set_intra_op_parallelism_threads(intra_op_parallelism_threads_); return xla::ClientLibrary::GetOrCreateLocalClient(options); } Allocator* XlaDevice::GetAllocator(AllocatorAttributes attr) { mutex_lock lock(mu_); return GetAllocatorLocked(attr); } Allocator* XlaDevice::GetAllocatorLocked(AllocatorAttributes attr) { if (attr.on_host()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 21:05:42 UTC 2024 - 24.3K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util_test.cc
host_alloc_attr.set_on_host(true); host_allocator_ = device_->GetAllocator(host_alloc_attr); // Get the device allocator. This should give us an AsyncValueAllocator. AllocatorAttributes device_alloc_attr; device_alloc_attr.set_on_host(false); device_allocator_ = device_->GetAllocator(device_alloc_attr); // Create the DeviceCompiler to help with compiling executables.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 28.8K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compile_on_demand_op.cc
se::Stream* stream = ctx->op_device_context() ? ctx->op_device_context()->stream() : nullptr; std::shared_ptr<se::DeviceMemoryAllocator> allocator_ptr = GetAllocator(ctx->device(), stream, platform_info_); se::DeviceMemoryAllocator* allocator = allocator_ptr.get(); XlaComputationLaunchContext launch_context( client, allocator, client->default_device_ordinal(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 29 08:39:39 UTC 2024 - 13.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device.h
std::optional<std::set<int>> allowed_devices; }; // Creates a new XLA Device. XlaDevice(const SessionOptions& session_options, const Options& options); ~XlaDevice() override; Allocator* GetAllocator(AllocatorAttributes attr) override TF_LOCKS_EXCLUDED(mu_); void Compute(OpKernel* op_kernel, OpKernelContext* context) override; void ComputeAsync(AsyncOpKernel* op_kernel, OpKernelContext* context,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 13.4K bytes - Viewed (0)