- Sort Score
- Result 10 results
- Languages All
Results 51 - 58 of 58 for mib (0.5 sec)
-
src/runtime/malloc.go
// order of a few MiB in size. // // The kind of metadata this applies to has a very low overhead when compared // to address space used, but their constant overheads for small heaps would // be very high if they were to be backed by huge pages (e.g. a few MiB makes // a huge difference for an 8 MiB heap, but barely any difference for a 1 GiB
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 59.6K bytes - Viewed (0) -
cmd/sts-handlers.go
clientCertificate = "AssumeRoleWithCertificate" customTokenIdentity = "AssumeRoleWithCustomToken" assumeRole = "AssumeRole" stsRequestBodyLimit = 10 * (1 << 20) // 10 MiB // JWT claim keys expClaim = "exp" subClaim = "sub" audClaim = "aud" issClaim = "iss" // JWT claim to check the parent user parentClaim = "parent" // LDAP claim keys
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Tue Jun 11 03:13:30 UTC 2024 - 33.9K bytes - Viewed (0) -
src/runtime/mgcscavenge.go
// mapped, since min is likely rounded down to include the system page containing minHeapIdx. // // For a chunk size of 4 MiB this structure will only use 2 MiB for a 1 TiB contiguous heap. chunks []atomicScavChunkData min, max atomic.Uintptr minHeapIdx atomic.Uintptr
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:48:45 UTC 2024 - 52.3K bytes - Viewed (0) -
src/runtime/export_test.go
s := new(ScavengeIndex) // This is a bit lazy but we easily guarantee we'll be able // to reference all the relevant chunks. The worst-case // memory usage here is 512 MiB, but tests generally use // small offsets from BaseChunkIdx, which results in ~100s // of KiB in memory use. // // This may still be worth making better, at least by sharing
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:50:53 UTC 2024 - 46.1K bytes - Viewed (0) -
cmd/object-api-multipart_test.go
if err != nil { // Failed to create NewMultipartUpload, abort. t.Fatalf("%s : %s", instanceType, err) } uploadIDs = append(uploadIDs, res.UploadID) // Parts with size greater than 5 MiB. // Generating a 6MiB byte array. validPart := bytes.Repeat([]byte("abcdef"), 1*humanize.MiByte) validPartMD5 := getMD5Hash(validPart) // Create multipart parts.
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Thu Feb 22 06:26:06 UTC 2024 - 77.1K bytes - Viewed (0) -
cluster/gce/windows/k8s-node-setup.psm1
# chunk size of 5MB per write request. buffer_chunk_limit 512k # Cap the combined memory usage of this buffer and the one below to # 512KiB/chunk * (6 + 2) chunks = 4 MiB buffer_queue_limit 6 # Never wait more than 5 seconds before flushing logs in the non-error case. flush_interval 5s # Never wait longer than 30 seconds between retries. max_retry_wait 30
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Fri Jun 07 21:13:22 UTC 2024 - 88.3K bytes - Viewed (0) -
src/runtime/mheap.go
assertLockHeld(&h.lock) // We must grow the heap in whole palloc chunks. // We call sysMap below but note that because we // round up to pallocChunkPages which is on the order // of MiB (generally >= to the huge page size) we // won't be calling it too much. ask := alignUp(npage, pallocChunkPages) * pageSize totalGrowth := uintptr(0) // This may overflow because ask could be very large
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 78K bytes - Viewed (0) -
okhttp/src/test/java/okhttp3/URLConnectionTest.kt
override fun contentType(): MediaType? { return null } override fun writeTo(sink: BufferedSink) { val data = ByteArray(2 * 1024 * 1024) // 2 MiB. sink.write(data) } }, ) assertFailsWith<SocketTimeoutException> { getResponse(request) } } @Test
Registered: Sun Jun 16 04:42:17 UTC 2024 - Last Modified: Sat Jan 20 10:30:28 UTC 2024 - 131.7K bytes - Viewed (0)