- Sort Score
- Result 10 results
- Languages All
Results 1 - 7 of 7 for uncommitted (0.17 sec)
-
okhttp/src/test/java/okhttp3/internal/cache/DiskLruCacheTest.kt
@ParameterizedTest @ArgumentsSource(FileSystemParamProvider::class) fun recoverFromInitializationFailure(parameters: Pair<FileSystem, Boolean>) { setUp(parameters.first, parameters.second) // Add an uncommitted entry. This will get detected on initialization, and the cache will // attempt to delete the file. Do not explicitly close the cache here so the entry is left as // incomplete. val creator = cache.edit("k1")!!
Registered: Sun Jun 16 04:42:17 UTC 2024 - Last Modified: Mon Apr 15 14:55:09 UTC 2024 - 75.8K bytes - Viewed (0) -
src/runtime/mgcscavenge.go
// out of scavenged memory incurs a potentially expensive page fault. // // If a memory limit is set, then we wish to pick a scavenge goal that maintains // that memory limit. For that, we look at total memory that has been committed // (memstats.mappedReady) and try to bring that down below the limit. In this case, // we want to give buffer space in the *opposite* direction. When the application
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:48:45 UTC 2024 - 52.3K bytes - Viewed (0) -
src/main/java/jcifs/smb/SmbSessionImpl.java
* sent. Unfortunately calling disconnect() doesn't always * actually shutdown the connection before other threads * have committed themselves (e.g. InterruptTest example). */ try { trans.disconnect(true); }
Registered: Wed Jun 12 15:45:55 UTC 2024 - Last Modified: Sun Nov 14 17:41:04 UTC 2021 - 49K bytes - Viewed (0) -
src/runtime/malloc.go
// 32-bit and on Windows. We use smaller arenas on Windows // because all committed memory is charged to the process, // even if it's not touched. Hence, for processes with small // heaps, the mapped arena space needs to be commensurate. // This is particularly important with the race detector, // since it significantly amplifies the cost of committed // memory. heapArenaBytes = 1 << logHeapArenaBytes
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 59.6K bytes - Viewed (0) -
src/regexp/syntax/parse.go
func (p *parser) parseUnicodeClass(s string, r []rune) (out []rune, rest string, err error) { if p.flags&UnicodeGroups == 0 || len(s) < 2 || s[0] != '\\' || s[1] != 'p' && s[1] != 'P' { return } // Committed to parse or return error. sign := +1 if s[1] == 'P' { sign = -1 } t := s[2:] c, t, err := nextRune(t) if err != nil { return } var seq, name string if c != '{' {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 02 13:59:01 UTC 2024 - 52.1K bytes - Viewed (0) -
src/cmd/go/internal/test/test.go
if j < 0 { if cache.DebugTest { fmt.Fprintf(os.Stderr, "testcache: %s: test output malformed\n", a.Package.ImportPath) } return false } j += i + len("ok \t") + 1 // Committed to printing. c.buf = new(bytes.Buffer) c.buf.Write(data[:j]) c.buf.WriteString("(cached)") for j < len(data) && ('0' <= data[j] && data[j] <= '9' || data[j] == '.' || data[j] == 's') { j++ }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 16 14:34:32 UTC 2024 - 71.9K bytes - Viewed (0) -
src/runtime/mheap.go
if typ == spanAllocHeap { gcController.heapInUse.add(int64(nbytes)) } // Update consistent stats. stats := memstats.heapStats.acquire() atomic.Xaddint64(&stats.committed, int64(scav)) atomic.Xaddint64(&stats.released, -int64(scav)) switch typ { case spanAllocHeap: atomic.Xaddint64(&stats.inHeap, int64(nbytes)) case spanAllocStack:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 78K bytes - Viewed (0)