Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 179 for allocators_ (0.54 sec)

  1. tensorflow/compiler/jit/device_context_test.cc

        TF_EXPECT_OK(status);
        device_context_.reset(device_context);
      }
    
      std::unique_ptr<Device> device_;
      tensorflow::core::RefCountPtr<DeviceContext> device_context_;
      tensorflow::Allocator* host_allocator_;
      tensorflow::Allocator* device_allocator_;
    };
    
    #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
    TEST_F(DeviceContextTest, TestXlaGpuRoundTripTransferWithDeviceApi) {
      SetDevice(DEVICE_XLA_GPU);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 3.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/xla_launch_util.cc

    static Tensor MakeTensor(DataType dtype, const TensorShape& shape,
                             se::DeviceMemoryBase buffer, Allocator* allocator) {
      size_t expected_size = shape.num_elements() * DataTypeSize(dtype);
      auto* tensor_buffer = new XlaTensorBuffer(buffer.opaque(), expected_size,
                                                buffer.size(), allocator);
      Tensor t(dtype, shape, tensor_buffer);
      tensor_buffer->Unref();
      return t;
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 00:36:08 UTC 2024
    - 40.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/kernels/xla_ops.cc

        xla::LocalClient* client, se::DeviceMemoryAllocator* allocator) {
      se::Stream* stream = GetStream(ctx);
      int device_ordinal = stream ? stream->parent()->device_ordinal()
                                  : client->default_device_ordinal();
      XlaComputationLaunchContext launch_context(
          client, allocator, device_ordinal,
          /*allocate_xla_tensors=*/platform_info.is_on_xla_device(),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 41.4K bytes
    - Viewed (0)
  4. pkg/registry/core/service/ipallocator/bitmap_test.go

    }
    
    func TestClusterIPMetrics(t *testing.T) {
    	clearMetrics()
    	// create IPv4 allocator
    	cidrIPv4 := "10.0.0.0/24"
    	_, clusterCIDRv4, _ := netutils.ParseCIDRSloppy(cidrIPv4)
    	a, err := NewInMemory(clusterCIDRv4)
    	if err != nil {
    		t.Fatalf("unexpected error creating CidrSet: %v", err)
    	}
    	a.EnableMetrics()
    	// create IPv6 allocator
    	cidrIPv6 := "2001:db8::/112"
    	_, clusterCIDRv6, _ := netutils.ParseCIDRSloppy(cidrIPv6)
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Wed Jan 25 20:32:40 UTC 2023
    - 21.1K bytes
    - Viewed (0)
  5. src/runtime/mheap.go

    	}
    
    	spanalloc              fixalloc // allocator for span*
    	cachealloc             fixalloc // allocator for mcache*
    	specialfinalizeralloc  fixalloc // allocator for specialfinalizer*
    	specialprofilealloc    fixalloc // allocator for specialprofile*
    	specialReachableAlloc  fixalloc // allocator for specialReachable
    	specialPinCounterAlloc fixalloc // allocator for specialPinCounter
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 78K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tfr/ir/tfr_types.h

      explicit TFRTypeStorage(unsigned num_attrs) : num_attrs(num_attrs) {}
    
      static TFRTypeStorage* construct(TypeStorageAllocator& allocator, KeyTy key) {
        // Allocate a new storage instance.
        auto byteSize = TFRTypeStorage::totalSizeToAlloc<StringAttr>(key.size());
        auto rawMem = allocator.allocate(byteSize, alignof(TFRTypeStorage));
        auto result = ::new (rawMem) TFRTypeStorage(key.size());
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Dec 05 07:17:01 UTC 2023
    - 4.1K bytes
    - Viewed (0)
  7. src/runtime/mklockrank.go

    # Above WB, we can have write barriers.
    < WB
    # Below WB is the write barrier implementation.
    < wbufSpans;
    
    # Span allocator
    stackLarge,
      stackpool,
      wbufSpans
    # Above mheap is anything that can call the span allocator.
    < mheap;
    # Below mheap is the span allocator implementation.
    #
    # Specials: we're allowed to allocate a special while holding
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:47:01 UTC 2024
    - 9.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/jit/xla_platform_info.cc

        // If we are on an XlaDevice, use the underlying XLA platform's allocator
        // directly. We could use the StreamExecutor's allocator which may
        // theoretically be more correct, but XLA returns a nice OOM message in a
        // Status and StreamExecutor does not.
        //
        // Importantly we can't use ctx->device()->GetAllocator() as the allocator
        // (which xla_allocator above uses) as on an XlaDevice, this is a dummy
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 17:23:27 UTC 2024
    - 17.4K bytes
    - Viewed (0)
  9. test/fixedbugs/issue46725.go

    // Use of this source code is governed by a BSD-style
    // license that can be found in the LICENSE file.
    
    package main
    
    import "runtime"
    
    type T [4]int // N.B., [4]int avoids runtime's tiny object allocator
    
    //go:noinline
    func g(x []*T) ([]*T, []*T) { return x, x }
    
    func main() {
    	const Jenny = 8675309
    	s := [10]*T{{Jenny}}
    
    	done := make(chan struct{})
    	runtime.SetFinalizer(s[0], func(p *T) { close(done) })
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Aug 11 20:13:07 UTC 2022
    - 818 bytes
    - Viewed (0)
  10. staging/src/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go

    	if memAlloc == nil {
    		klog.Error("a mandatory memory allocator wasn't provided, this might have a negative impact on performance, check invocations of EncodeWithAllocator method, falling back on runtime.SimpleAllocator")
    		memAlloc = &runtime.SimpleAllocator{}
    	}
    	switch t := obj.(type) {
    	case bufferedReverseMarshaller:
    		// this path performs a single allocation during write only when the Allocator wasn't provided
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Wed Feb 23 13:38:23 UTC 2022
    - 17.8K bytes
    - Viewed (0)
Back to top