Search Options

Results per page
Sort
Preferred Languages
Advance

Results 91 - 100 of 867 for heal (0.1 sec)

  1. src/runtime/proc.go

    	if !q.empty() {
    		q.tail.ptr().schedlink = l.head
    		l.head = q.head
    	}
    }
    
    // pop removes and returns the head of l. If l is empty, it returns nil.
    func (l *gList) pop() *g {
    	gp := l.head.ptr()
    	if gp != nil {
    		l.head = gp.schedlink
    	}
    	return gp
    }
    
    //go:linkname setMaxThreads runtime/debug.setMaxThreads
    func setMaxThreads(in int) (out int) {
    	lock(&sched.lock)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 207.5K bytes
    - Viewed (0)
  2. src/cmd/vendor/golang.org/x/mod/sumdb/client.go

    }
    
    // mergeLatest merges the tree head in msg
    // with the Client's current latest tree head,
    // ensuring the result is a consistent timeline.
    // If the result is inconsistent, mergeLatest calls c.ops.SecurityError
    // with a detailed security error message and then
    // (only if c.ops.SecurityError does not exit the program) returns ErrSecurity.
    // If the Client's current latest tree head moves forward,
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Feb 08 17:50:49 UTC 2024
    - 19.1K bytes
    - Viewed (0)
  3. src/runtime/mgc.go

    // one object. When a goroutine needs to allocate large-object span from heap,
    // it sweeps spans until it frees at least that many pages into heap. There is
    // one case where this may not suffice: if a goroutine sweeps and frees two
    // nonadjacent one-page spans to the heap, it will allocate a new two-page
    // span, but there can still be other one-page unswept spans which could be
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 16:25:21 UTC 2024
    - 62K bytes
    - Viewed (0)
  4. src/runtime/traceallocfree.go

    	traceAllocFreeInfoBatch         // Contains info for interpreting events. [min heap addr, page size, min heap align, min stack align]
    )
    
    // traceSnapshotMemory takes a snapshot of all runtime memory that there are events for
    // (heap spans, heap objects, goroutine stacks, etc.) and writes out events for them.
    //
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 20:32:51 UTC 2024
    - 5.9K bytes
    - Viewed (0)
  5. hack/verify-govulncheck.sh

    govulncheck -scan module ./... > "${KUBE_TEMP}/head.txt"
    pushd "${WORKTREE}" >/dev/null
      govulncheck -scan module ./... > "${KUBE_TEMP}/pr-base.txt"
    popd >/dev/null
    
    echo -e "\n HEAD: $(cat "${KUBE_TEMP}"/head.txt)" 
    echo -e "\n PR_BASE: $(cat "${KUBE_TEMP}/pr-base.txt")" 
    
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Fri Mar 01 06:06:44 UTC 2024
    - 1.7K bytes
    - Viewed (0)
  6. src/runtime/metrics/doc.go

    		Cumulative count of heap allocations whose storage was freed
    		by the garbage collector. Note that this does not include tiny
    		objects as defined by /gc/heap/tiny/allocs:objects, only tiny
    		blocks.
    
    	/gc/heap/goal:bytes
    		Heap size target for the end of the GC cycle.
    
    	/gc/heap/live:bytes
    		Heap memory occupied by live objects that were marked by the
    		previous GC.
    
    	/gc/heap/objects:objects
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:58:43 UTC 2024
    - 20K bytes
    - Viewed (0)
  7. src/cmd/vendor/golang.org/x/telemetry/internal/counter/file.go

    	// We know name does not appear in the chain starting at head.
    	for {
    		next.Store(head)
    		if m.cas32(headOff, head, start) {
    			return v, nil, nil
    		}
    
    		// Check new elements in chain for duplicates.
    		old := head
    		head = m.load32(headOff)
    		for off := head; off != old; {
    			ename, enext, v, ok := m.entryAt(off)
    			if !ok {
    				return nil, nil, errCorrupt
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Jun 04 16:19:04 UTC 2024
    - 18.2K bytes
    - Viewed (0)
  8. src/runtime/chan.go

    // sg must already be dequeued from c.
    // ep must be non-nil and point to the heap or the caller's stack.
    func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
    	if raceenabled {
    		if c.dataqsiz == 0 {
    			racesync(c, sg)
    		} else {
    			// Pretend we go through the buffer, even though
    			// we copy directly. Note that we need to increment
    			// the head/tail locations only when raceenabled.
    			racenotify(c, c.recvx, nil)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 01:16:50 UTC 2024
    - 25.9K bytes
    - Viewed (0)
  9. testing/smoke-test/src/smokeTest/resources/org/gradle/play/integtest/fixtures/external/basicplayapp/app/views/main.scala.html

    @(title: String)(content: Html)
    <!DOCTYPE html>
    <html>
        <head>
            <title>@title</title>
            <link rel="stylesheet" media="screen" href="@routes.Assets.at("stylesheets/main.css")">
            <link rel="shortcut icon" type="image/png" href="@routes.Assets.at("images/favicon.png")">
            <script src="@routes.Assets.at("javascripts/hello.js")" type="text/javascript"></script>
        </head>
        <body>
    @content
        </body>
    Registered: Wed Jun 12 18:38:38 UTC 2024
    - Last Modified: Thu Apr 04 07:21:38 UTC 2024
    - 440 bytes
    - Viewed (0)
  10. platforms/core-runtime/launcher/src/integTest/groovy/org/gradle/launcher/daemon/DaemonJvmSettingsIntegrationTest.groovy

                        println "Initial Heap: " + memBean.heapMemoryUsage.init
                        assert memBean.heapMemoryUsage.init == 256 * 1024 * 1024
                        println "    Max Heap: " + memBean.heapMemoryUsage.max
    
                        // Java 8 does not report max heap size exactly matching the command line setting
                        if ($java9orAbove) {
    Registered: Wed Jun 12 18:38:38 UTC 2024
    - Last Modified: Mon May 06 13:00:39 UTC 2024
    - 4.3K bytes
    - Viewed (0)
Back to top