- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 83 for Deallocator (0.27 sec)
-
tensorflow/compiler/jit/xla_device.cc
} Status status; if (alloc_attrs.on_host()) { *tensor = parsed; } else { Allocator* allocator; { mutex_lock lock(mu_); allocator = GetAllocatorLocked(alloc_attrs); } Tensor copy(allocator, parsed.dtype(), parsed.shape()); TF_RETURN_IF_ERROR( device_context->CopyCPUTensorToDeviceSync(&parsed, this, ©)); *tensor = copy;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 21:05:42 UTC 2024 - 24.3K bytes - Viewed (0) -
pkg/registry/core/service/ipallocator/controller/repairip.go
defer r.svcQueue.ShutDown() r.broadcaster.StartRecordingToSink(stopCh) defer r.broadcaster.Shutdown() klog.Info("Starting ipallocator-repair-controller") defer klog.Info("Shutting down ipallocator-repair-controller") if !cache.WaitForNamedCacheSync("ipallocator-repair-controller", stopCh, r.ipAddressSynced, r.servicesSynced, r.serviceCIDRSynced) { return }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 24.7K bytes - Viewed (0) -
tensorflow/compiler/jit/kernels/xla_ops.cc
} std::shared_ptr<se::DeviceMemoryAllocator> allocator = GetAllocator(ctx->device(), GetStream(ctx), platform_info); XlaComputationLaunchContext launch_context = GetLaunchContext(platform_info, ctx, client, allocator.get()); const xla::HloInputOutputAliasConfig& input_output_alias =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 41.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device.h
std::optional<std::set<int>> allowed_devices; }; // Creates a new XLA Device. XlaDevice(const SessionOptions& session_options, const Options& options); ~XlaDevice() override; Allocator* GetAllocator(AllocatorAttributes attr) override TF_LOCKS_EXCLUDED(mu_); void Compute(OpKernel* op_kernel, OpKernelContext* context) override; void ComputeAsync(AsyncOpKernel* op_kernel, OpKernelContext* context,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 13.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util_test.cc
TF_CHECK_OK(device_->TryGetDeviceContext(&device_context_)); // Get the host allocator. AllocatorAttributes host_alloc_attr; host_alloc_attr.set_on_host(true); host_allocator_ = device_->GetAllocator(host_alloc_attr); // Get the device allocator. This should give us an AsyncValueAllocator. AllocatorAttributes device_alloc_attr; device_alloc_attr.set_on_host(false);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 28.8K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compile_on_demand_op.cc
std::shared_ptr<se::DeviceMemoryAllocator> allocator_ptr = GetAllocator(ctx->device(), stream, platform_info_); se::DeviceMemoryAllocator* allocator = allocator_ptr.get(); XlaComputationLaunchContext launch_context( client, allocator, client->default_device_ordinal(), /*allocate_xla_tensors=*/platform_info_.xla_device_metadata() != nullptr,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 29 08:39:39 UTC 2024 - 13.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util.cc
const std::map<int, const Tensor*>& resource_vars) { se::Stream* stream = ctx->op_device_context() ? ctx->op_device_context()->stream() : nullptr; Allocator* allocator = ctx->device()->GetAllocator({}); // Computation output should always be a tuple. VLOG(2) << "Result tuple shape: " << output.on_host_shape().DebugString(); VLOG(2) << "Result tuple shape (on device): "
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 40.4K bytes - Viewed (0) -
pkg/controller/nodeipam/ipam/cidr_allocator.go
"k8s.io/klog/v2" ) // CIDRAllocatorType is the type of the allocator to use. type CIDRAllocatorType string const ( // RangeAllocatorType is the allocator that uses an internal CIDR // range allocator to do node CIDR range allocations. RangeAllocatorType CIDRAllocatorType = "RangeAllocator" // CloudAllocatorType is the allocator that uses cloud platform // support to do node CIDR range allocations.
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed May 01 13:03:57 UTC 2024 - 5.3K bytes - Viewed (0) -
tensorflow/c/experimental/next_pluggable_device/tensor_pjrt_buffer_util_test.cc
return new PJRT_Buffer{std::move(*buffer), c_api_client->pjrt_c_client()}; } TEST(TensorPjRtBufferUtilTest, GetPjRtCBufferFromTensorNoBuffer) { auto allocator = std::make_unique<AsyncValueAllocator>(); tensorflow::Tensor tensor(allocator.get(), DT_FLOAT, {1}); EXPECT_THAT( GetPjRtCBufferFromTensor(&tensor), StatusIs(error::INTERNAL, HasSubstr(absl::StrCat(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 01 16:29:40 UTC 2024 - 7.2K bytes - Viewed (0) -
pkg/controller/nodeipam/ipam/range_allocator_test.go
// Initialize the range allocator. allocator, err := NewCIDRRangeAllocator(tCtx, tc.fakeNodeHandler, fakeNodeInformer, tc.allocatorParams, nodeList) if err != nil { t.Errorf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err) return } rangeAllocator, ok := allocator.(*rangeAllocator) if !ok {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed Apr 24 10:06:15 UTC 2024 - 25.2K bytes - Viewed (0)