- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 462 for Allocate (0.2 sec)
-
src/index/suffixarray/suffixarray.go
} if int64(int(n64)) != n64 || int(n64) < 0 { return errTooBig } n := int(n64) // allocate space if 2*n < cap(x.data) || cap(x.data) < n || x.sa.int32 != nil && n > maxData32 || x.sa.int64 != nil && n <= maxData32 { // new data is significantly smaller or larger than // existing buffers - allocate new ones x.data = make([]byte, n) x.sa.int32 = nil x.sa.int64 = nil if n <= maxData32 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:00:11 UTC 2024 - 9.5K bytes - Viewed (0) -
pkg/controller/nodeipam/ipam/cidr_allocator.go
cidrUpdateRetries = 3 ) // nodePollInterval is used in listing node var nodePollInterval = 10 * time.Second // CIDRAllocator is an interface implemented by things that know how // to allocate/occupy/recycle CIDR for nodes. type CIDRAllocator interface { // AllocateOrOccupyCIDR looks at the given node, assigns it a valid // CIDR if it doesn't currently have one or mark the CIDR as used if
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed May 01 13:03:57 UTC 2024 - 5.3K bytes - Viewed (0) -
staging/src/k8s.io/apimachinery/pkg/util/framer/framer.go
m := json.RawMessage(data[:0]) if err := r.decoder.Decode(&m); err != nil { return 0, err } // If capacity of data is less than length of the message, decoder will allocate a new slice // and set m to it, which means we need to copy the partial result back into data and preserve // the remaining result for subsequent reads. if len(m) > cap(data) { copy(data, m)
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Thu May 09 13:33:12 UTC 2024 - 4.9K bytes - Viewed (0) -
src/runtime/race_s390x.s
MOVD R15, R7 // Save SP. MOVD g_m(g), R8 // R8 = thread. MOVD m_g0(R8), R8 // R8 = g0. CMPBEQ R8, g, call // Already on g0? MOVD (g_sched+gobuf_sp)(R8), R15 // Switch SP to g0. call: SUB $160, R15 // Allocate C frame. BL R1 // Call C code. MOVD R7, R15 // Restore SP. RET // Return to Go. // C->Go callback thunk that allows to call runtime·racesymbolize from C
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:37:29 UTC 2024 - 13.1K bytes - Viewed (0) -
src/slices/slices_test.go
t.Errorf("Grow should not allocate when given sufficient capacity; allocated %v times", n) } if n := testing.AllocsPerRun(100, func() { _ = Grow(s2, cap(s2)-len(s2)+1) }); n != 1 { errorf := t.Errorf if race.Enabled || testenv.OptimizationOff() { errorf = t.Logf // this allocates multiple times in race detector mode } errorf("Grow should allocate once when given insufficient capacity; allocated %v times", n)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 26 13:32:06 UTC 2024 - 33.2K bytes - Viewed (0) -
src/internal/filepathlite/path.go
"slices" ) var errInvalidPath = errors.New("invalid path") // A lazybuf is a lazily constructed path buffer. // It supports append, reading previously appended bytes, // and retrieving the final string. It does not allocate a buffer // to hold the output until that output diverges from s. type lazybuf struct { path string buf []byte w int volAndPath string volLen int }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 26 23:07:50 UTC 2024 - 5.8K bytes - Viewed (0) -
src/unique/handle.go
// this path on the first use of Make, and it's not on the hot path. setupMake.Do(registerCleanup) ma = addUniqueMap[T](typ) } m := ma.(*uniqueMap[T]) // Keep around any values we allocate for insertion. There // are a few different ways we can race with other threads // and create values that we might discard. By keeping // the first one we make around, we can avoid generating
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 16:01:55 UTC 2024 - 5.3K bytes - Viewed (0) -
tensorflow/c/experimental/stream_executor/stream_executor_test.cc
SP_PlatformFns platform_fns_; SP_DeviceFns device_fns_; SP_StreamExecutor se_; SP_TimerFns timer_fns_; std::unique_ptr<CPlatform> cplatform_; }; TEST_F(StreamExecutorTest, Allocate) { se_.allocate = [](const SP_Device* const device, uint64_t size, int64_t memory_space, SP_DeviceMemoryBase* const mem) { mem->struct_size = SP_DEVICE_MEMORY_BASE_STRUCT_SIZE; mem->opaque = malloc(size);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 19:54:04 UTC 2024 - 26.5K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device_context.cc
// pointer. We can return an empty object and ignore num_bytes here because we // have control over all of the uses of this device tensor, and can lazily // allocate memory when used. This allows us to also know the shape of the // allocated Tensor, which is useful if the device's tensor representation // differs from the host. return XlaTensor::ToOpaquePointer(new XlaTensor()); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 12.7K bytes - Viewed (0) -
src/runtime/map_faststr.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:17:26 UTC 2024 - 15.3K bytes - Viewed (0)