Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 10 for traceRelease (0.33 sec)

  1. src/runtime/traceruntime.go

    //
    //go:nosplit
    func (tl traceLocker) ok() bool {
    	return tl.gen != 0
    }
    
    // traceRelease indicates that this M is done writing trace events.
    //
    // nosplit because it's called on the syscall path when stack movement is forbidden.
    //
    //go:nosplit
    func traceRelease(tl traceLocker) {
    	seq := tl.mp.trace.seqlock.Add(1)
    	if debugTraceReentrancy && seq%2 != 0 {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 25.7K bytes
    - Viewed (0)
  2. src/runtime/trace.go

    		tl.Gomaxprocs(gomaxprocs)
    		traceRelease(tl)
    	}
    
    	// Emit a GCActive event in the new generation if necessary.
    	//
    	// It's important that we do this before allowing stop-the-worlds again,
    	// because that could emit global GC-related events.
    	if !stopTrace && (gcphase == _GCmark || gcphase == _GCmarktermination) {
    		tl := traceAcquire()
    		tl.GCActive()
    		traceRelease(tl)
    	}
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 21:17:41 UTC 2024
    - 37.1K bytes
    - Viewed (0)
  3. src/runtime/mgcsweep.go

    		throw("mspan.sweep: bad span state")
    	}
    
    	trace := traceAcquire()
    	if trace.ok() {
    		trace.GCSweepSpan(s.npages * _PageSize)
    		traceRelease(trace)
    	}
    
    	mheap_.pagesSwept.Add(int64(s.npages))
    
    	spc := s.spanclass
    	size := s.elemsize
    
    	// The allocBits indicate which unmarked objects don't need to be
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:52:18 UTC 2024
    - 32.9K bytes
    - Viewed (0)
  4. src/runtime/stack.go

    			s.elemsize = uintptr(n)
    		}
    		v = unsafe.Pointer(s.base())
    	}
    
    	if traceAllocFreeEnabled() {
    		trace := traceTryAcquire()
    		if trace.ok() {
    			trace.GoroutineStackAlloc(uintptr(v), uintptr(n))
    			traceRelease(trace)
    		}
    	}
    	if raceenabled {
    		racemalloc(v, uintptr(n))
    	}
    	if msanenabled {
    		msanmalloc(v, uintptr(n))
    	}
    	if asanenabled {
    		asanunpoison(v, uintptr(n))
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 41.1K bytes
    - Viewed (0)
  5. src/runtime/race.go

    // In terms of the C memory model, RaceRelease is equivalent to
    // atomic_store(memory_order_release).
    //
    //go:nosplit
    func RaceRelease(addr unsafe.Pointer) {
    	racerelease(addr)
    }
    
    // RaceReleaseMerge is like RaceRelease, but also establishes a happens-before
    // relation with the preceding RaceRelease or RaceReleaseMerge on addr.
    //
    // In terms of the C memory model, RaceReleaseMerge is equivalent to
    // atomic_exchange(memory_order_release).
    //
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:37:29 UTC 2024
    - 20.4K bytes
    - Viewed (0)
  6. src/runtime/chan.go

    		unlock(&c.lock)
    		panic(plainError("close of closed channel"))
    	}
    
    	if raceenabled {
    		callerpc := getcallerpc()
    		racewritepc(c.raceaddr(), callerpc, abi.FuncPCABIInternal(closechan))
    		racerelease(c.raceaddr())
    	}
    
    	c.closed = 1
    
    	var glist gList
    
    	// release all readers
    	for {
    		sg := c.recvq.dequeue()
    		if sg == nil {
    			break
    		}
    		if sg.elem != nil {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 01:16:50 UTC 2024
    - 25.9K bytes
    - Viewed (0)
  7. src/runtime/type.go

    }
    
    func reflectOffsLock() {
    	lock(&reflectOffs.lock)
    	if raceenabled {
    		raceacquire(unsafe.Pointer(&reflectOffs.lock))
    	}
    }
    
    func reflectOffsUnlock() {
    	if raceenabled {
    		racerelease(unsafe.Pointer(&reflectOffs.lock))
    	}
    	unlock(&reflectOffs.lock)
    }
    
    // resolveNameOff should be an internal detail,
    // but widely used packages access it using linkname.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 01:17:26 UTC 2024
    - 12.7K bytes
    - Viewed (0)
  8. src/runtime/syscall_windows.go

    	// until then.
    	if raceenabled && mainStarted {
    		raceacquire(unsafe.Pointer(&cbs.lock))
    	}
    }
    
    func cbsUnlock() {
    	if raceenabled && mainStarted {
    		racerelease(unsafe.Pointer(&cbs.lock))
    	}
    	unlock(&cbs.lock)
    }
    
    // winCallback records information about a registered Go callback.
    type winCallback struct {
    	fn     *funcval // Go function
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 20:12:46 UTC 2024
    - 16.6K bytes
    - Viewed (0)
  9. src/runtime/metrics.go

    	semacquire1(&metricsSema, true, 0, 0, waitReasonSemacquire)
    	if raceenabled {
    		raceacquire(unsafe.Pointer(&metricsSema))
    	}
    }
    
    func metricsUnlock() {
    	if raceenabled {
    		racerelease(unsafe.Pointer(&metricsSema))
    	}
    	semrelease(&metricsSema)
    }
    
    // initMetrics initializes the metrics map if it hasn't been yet.
    //
    // metricsSema must be held.
    func initMetrics() {
    	if metricsInit {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Apr 08 21:03:13 UTC 2024
    - 26K bytes
    - Viewed (0)
  10. src/runtime/time.go

    //go:linkname newTimer time.newTimer
    func newTimer(when, period int64, f func(arg any, seq uintptr, delay int64), arg any, c *hchan) *timeTimer {
    	t := new(timeTimer)
    	t.timer.init(nil, nil)
    	t.trace("new")
    	if raceenabled {
    		racerelease(unsafe.Pointer(&t.timer))
    	}
    	if c != nil {
    		lockInit(&t.sendLock, lockRankTimerSend)
    		t.isChan = true
    		c.timer = &t.timer
    		if c.dataqsiz == 0 {
    			throw("invalid timer channel: no capacity")
    		}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Mar 29 14:36:24 UTC 2024
    - 37.5K bytes
    - Viewed (0)
Back to top