Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 27 for Deallocator (0.18 sec)

  1. tensorflow/c/tf_tensor.cc

      Allocator* allocator = nullptr;
      if (arg == nullptr) {
        allocator = cpu_allocator();
      } else {
        allocator = reinterpret_cast<Allocator*>(arg);
      }
      if (LogMemory::IsEnabled() && data != nullptr) {
        LogMemory::RecordRawDeallocation(
            "TensorFlow C Api", LogMemory::EXTERNAL_TENSOR_ALLOCATION_STEP_ID, data,
            allocator, false);
      }
      allocator->DeallocateRaw(data);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Apr 14 21:57:32 UTC 2024
    - 11.5K bytes
    - Viewed (0)
  2. tensorflow/c/eager/c_api.cc

        void (*deallocator)(void* data, size_t len, void* arg),
        void* deallocator_arg, TF_Status* status) {
      tensorflow::Device* device = nullptr;
      tensorflow::EagerContext* context =
          tensorflow::ContextFromInterface(tensorflow::unwrap(ctx));
      status->status = context->FindDeviceFromName(device_name, &device);
      if (!status->status.ok()) {
        deallocator(data, len, deallocator_arg);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 08:11:23 UTC 2024
    - 44K bytes
    - Viewed (0)
  3. pkg/registry/core/service/ipallocator/cidrallocator.go

    func (c *MetaAllocator) AllocateService(service *api.Service, ip net.IP) error {
    	allocator, err := c.getAllocator(ip)
    	if err != nil {
    		return err
    	}
    	return allocator.AllocateService(service, ip)
    }
    
    func (c *MetaAllocator) Allocate(ip net.IP) error {
    	allocator, err := c.getAllocator(ip)
    	if err != nil {
    		return err
    	}
    	return allocator.Allocate(ip)
    }
    
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Sat May 04 18:33:12 UTC 2024
    - 13.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/xla_platform_info.cc

        // If we are on an XlaDevice, use the underlying XLA platform's allocator
        // directly. We could use the StreamExecutor's allocator which may
        // theoretically be more correct, but XLA returns a nice OOM message in a
        // Status and StreamExecutor does not.
        //
        // Importantly we can't use ctx->device()->GetAllocator() as the allocator
        // (which xla_allocator above uses) as on an XlaDevice, this is a dummy
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 17:23:27 UTC 2024
    - 17.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/xla_device.cc

      }
    
      Status status;
      if (alloc_attrs.on_host()) {
        *tensor = parsed;
      } else {
        Allocator* allocator;
        {
          mutex_lock lock(mu_);
          allocator = GetAllocatorLocked(alloc_attrs);
        }
        Tensor copy(allocator, parsed.dtype(), parsed.shape());
        TF_RETURN_IF_ERROR(
            device_context->CopyCPUTensorToDeviceSync(&parsed, this, &copy));
        *tensor = copy;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 20 21:05:42 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  6. pkg/registry/core/service/ipallocator/controller/repairip.go

    	defer r.svcQueue.ShutDown()
    	r.broadcaster.StartRecordingToSink(stopCh)
    	defer r.broadcaster.Shutdown()
    
    	klog.Info("Starting ipallocator-repair-controller")
    	defer klog.Info("Shutting down ipallocator-repair-controller")
    
    	if !cache.WaitForNamedCacheSync("ipallocator-repair-controller", stopCh, r.ipAddressSynced, r.servicesSynced, r.serviceCIDRSynced) {
    		return
    	}
    
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Sat May 04 18:33:12 UTC 2024
    - 24.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/jit/kernels/xla_ops.cc

          }
    
          std::shared_ptr<se::DeviceMemoryAllocator> allocator =
              GetAllocator(ctx->device(), GetStream(ctx), platform_info);
          XlaComputationLaunchContext launch_context =
              GetLaunchContext(platform_info, ctx, client, allocator.get());
    
          const xla::HloInputOutputAliasConfig& input_output_alias =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 41.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/jit/xla_launch_util.cc

        const std::map<int, const Tensor*>& resource_vars) {
      se::Stream* stream =
          ctx->op_device_context() ? ctx->op_device_context()->stream() : nullptr;
      Allocator* allocator = ctx->device()->GetAllocator({});
    
      // Computation output should always be a tuple.
      VLOG(2) << "Result tuple shape: " << output.on_host_shape().DebugString();
      VLOG(2) << "Result tuple shape (on device): "
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 00:36:08 UTC 2024
    - 40.4K bytes
    - Viewed (0)
  9. pkg/controller/nodeipam/ipam/range_allocator_test.go

    		// Initialize the range allocator.
    		allocator, err := NewCIDRRangeAllocator(tCtx, tc.fakeNodeHandler, fakeNodeInformer, tc.allocatorParams, nodeList)
    		if err != nil {
    			t.Errorf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err)
    			return
    		}
    		rangeAllocator, ok := allocator.(*rangeAllocator)
    		if !ok {
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Wed Apr 24 10:06:15 UTC 2024
    - 25.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_async.td

    def ExecuteOpWithAllocator : FallbackAsync_Op<"executeop.allocator",
        [Pure, CoreRT_TypedAttributeTrait, TFRT_CostFunctionInterface, TFRT_AttrCostTrait]> {
      let summary = "The Fallback ExecuteOp with custom allocator";
      let description = [{
        Similar to ExecuteOp but takes a custom allocator for allocating output tensors.
      }];
    
      let arguments = (ins
        TFAllocatorType:$allocator,
        Variadic<TFTensorType>:$args,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 15:01:21 UTC 2024
    - 15.8K bytes
    - Viewed (0)
Back to top