Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 3,518 for releasem (0.17 sec)

  1. src/runtime/arena.go

    		}
    	} else {
    		v, ok := s.userArenaChunkFree.takeFromFront(size, typ.Align_)
    		if ok {
    			ptr = unsafe.Pointer(v)
    		}
    	}
    	if ptr == nil {
    		// Failed to allocate.
    		mp.mallocing = 0
    		releasem(mp)
    		return nil
    	}
    	if s.needzero != 0 {
    		throw("arena chunk needs zeroing, but should already be zeroed")
    	}
    	// Set up heap bitmap and do extra accounting.
    	if typ.Pointers() {
    		if cap >= 0 {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:44:56 UTC 2024
    - 37.9K bytes
    - Viewed (0)
  2. src/cmd/compile/internal/test/inl_test.go

    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Apr 09 04:07:57 UTC 2024
    - 10.7K bytes
    - Viewed (0)
  3. src/runtime/symtab.go

    						break
    					} else {
    						cache.inUse--
    						releasem(mp)
    						return val, pc
    					}
    				}
    			}
    		} else if debugCheckCache && (cache.inUse < 1 || cache.inUse > 2) {
    			// Catch accounting errors or deeply reentrant use. In principle
    			// "inUse" should never exceed 2.
    			throw("cache.inUse out of range")
    		}
    		cache.inUse--
    		releasem(mp)
    	}
    
    	if !f.valid() {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 40K bytes
    - Viewed (0)
  4. src/runtime/trace.go

    		mp := acquirem()
    		for _, pp := range allp[:cap(allp)] {
    			pp.trace.inSweep = false
    			pp.trace.maySweep = false
    			pp.trace.swept = 0
    			pp.trace.reclaimed = 0
    		}
    		releasem(mp)
    	}
    
    	// Release the advance semaphore. If stopTrace is true we're still holding onto
    	// traceShutdownSema.
    	//
    	// Do a direct handoff. Don't let one caller of traceAdvance starve
    	// other calls to traceAdvance.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 21:17:41 UTC 2024
    - 37.1K bytes
    - Viewed (0)
  5. src/runtime/malloc.go

    	switch {
    	case p == 0:
    		return nil, 0
    	case p&(align-1) == 0:
    		return unsafe.Pointer(p), size + align
    	case GOOS == "windows":
    		// On Windows we can't release pieces of a
    		// reservation, so we release the whole thing and
    		// re-reserve the aligned sub-region. This may race,
    		// so we may have to try again.
    		sysFreeOS(unsafe.Pointer(p), size+align)
    		p = alignUp(p, align)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 59.6K bytes
    - Viewed (0)
  6. src/runtime/traceruntime.go

    	// might end up being disabled when we load it. In that case we need to undo
    	// what we did and bail.
    	gen := trace.gen.Load()
    	if gen == 0 {
    		mp.trace.seqlock.Add(1)
    		releasem(mp)
    		return traceLocker{}
    	}
    	return traceLocker{mp, gen}
    }
    
    // traceTryAcquireEnabled is like traceAcquireEnabled but may return an invalid
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 25.7K bytes
    - Viewed (0)
  7. src/runtime/os_linux.go

    	r1, r2, errno := syscall.Syscall6(trap, a1, a2, a3, a4, a5, a6)
    	if GOARCH == "ppc64" || GOARCH == "ppc64le" {
    		// TODO(https://go.dev/issue/51192 ): ppc64 doesn't use r2.
    		r2 = 0
    	}
    	if errno != 0 {
    		releasem(getg().m)
    		allocmLock.unlock()
    		startTheWorld(stw)
    		return r1, r2, errno
    	}
    
    	perThreadSyscall = perThreadSyscallArgs{
    		trap: trap,
    		a1:   a1,
    		a2:   a2,
    		a3:   a3,
    		a4:   a4,
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 25.9K bytes
    - Viewed (0)
  8. src/runtime/runtime1.go

    // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
    
    //go:nosplit
    func acquirem() *m {
    	gp := getg()
    	gp.m.locks++
    	return gp.m
    }
    
    //go:nosplit
    func releasem(mp *m) {
    	gp := getg()
    	mp.locks--
    	if mp.locks == 0 && gp.preempt {
    		// restore the preemption request in case we've cleared it in newstack
    		gp.stackguard0 = stackPreempt
    	}
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:52:17 UTC 2024
    - 19.3K bytes
    - Viewed (0)
  9. src/runtime/mgclimit.go

    	l.gcEnabled = enableGC
    	l.transitioning = true
    	// N.B. finishGCTransition releases the lock.
    	//
    	// We don't release here to increase the chance that if there's a failure
    	// to finish the transition, that we throw on failing to acquire the lock.
    }
    
    // finishGCTransition notifies the limiter that the GC transition is complete
    // and releases ownership of it. It also accumulates STW time in the bucket.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Apr 22 22:07:41 UTC 2024
    - 17.3K bytes
    - Viewed (0)
  10. src/runtime/mprof.go

    		} else {
    			mp.profStack[1] = gp.m.curg.sched.pc
    			nstk += 1 + fpTracebackPCs(unsafe.Pointer(gp.m.curg.sched.bp), mp.profStack[2:])
    		}
    	}
    
    	saveBlockEventStack(cycles, rate, mp.profStack[:nstk], which)
    	releasem(mp)
    }
    
    // lockTimer assists with profiling contention on runtime-internal locks.
    //
    // There are several steps between the time that an M experiences contention and
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:37 UTC 2024
    - 53.3K bytes
    - Viewed (0)
Back to top