- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 112 for flushed (0.17 sec)
-
src/runtime/tracestring.go
s = s[:maxTraceStringLen] } lock(&t.lock) w := unsafeTraceWriter(gen, t.buf) // Ensure we have a place to write to. var flushed bool w, flushed = w.ensure(2 + 2*traceBytesPerNumber + len(s) /* traceEvStrings + traceEvString + ID + len + string data */) if flushed { // Annotate the batch as containing strings. w.byte(byte(traceEvStrings)) } // Write out the string. w.byte(byte(traceEvString))
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 15 17:03:35 UTC 2024 - 2.4K bytes - Viewed (0) -
src/runtime/mgcwork.go
bytesMarked uint64 // Heap scan work performed on this gcWork. This is aggregated into // gcController by dispose and may also be flushed by callers. // Other types of scan work are flushed immediately. heapScanWork int64 // flushedWork indicates that a non-empty work buffer was // flushed to the global work list since the last gcMarkDone // termination check. Specifically, this indicates that this
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 12.9K bytes - Viewed (0) -
src/runtime/tracetype.go
// bound is pretty loose, but avoids counting // lots of varint sizes. // // Add 1 because we might also write a traceAllocFreeTypesBatch byte. var flushed bool w, flushed = w.ensure(1 + maxBytes) if flushed { // Annotate the batch as containing types. w.byte(byte(traceAllocFreeTypesBatch)) } // Emit type. w.varint(uint64(node.id)) w.varint(uint64(uintptr(unsafe.Pointer(typ))))
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 20:31:27 UTC 2024 - 2.3K bytes - Viewed (0) -
src/runtime/traceallocfree.go
func traceSnapshotMemory(gen uintptr) { assertWorldStopped() // Write a batch containing information that'll be necessary to // interpret the events. var flushed bool w := unsafeTraceExpWriter(gen, nil, traceExperimentAllocFree) w, flushed = w.ensure(1 + 4*traceBytesPerNumber) if flushed { // Annotate the batch as containing additional info. w.byte(byte(traceAllocFreeInfoBatch)) } // Emit info.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 20:32:51 UTC 2024 - 5.9K bytes - Viewed (0) -
src/runtime/tracecpu.go
} // Write out a trace event. w := unsafeTraceWriter(gen, trace.cpuBuf[gen%2]) // Ensure we have a place to write to. var flushed bool w, flushed = w.ensure(2 + 5*traceBytesPerNumber /* traceEvCPUSamples + traceEvCPUSample + timestamp + g + m + p + stack ID */) if flushed { // Annotate the batch as containing strings. w.byte(byte(traceEvCPUSamples)) } // Add the stack to the table.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 15 17:03:35 UTC 2024 - 8.7K bytes - Viewed (0) -
src/runtime/traceexp.go
return traceExpWriter{traceWriter{traceLocker: traceLocker{gen: gen}, traceBuf: buf}, exp} } // ensure makes sure that at least maxSize bytes are available to write. // // Returns whether the buffer was flushed. func (w traceExpWriter) ensure(maxSize int) (traceExpWriter, bool) { refill := w.traceBuf == nil || !w.available(maxSize) if refill { w.traceWriter = w.traceWriter.refill(w.exp) } return w, refill }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:47:01 UTC 2024 - 2.4K bytes - Viewed (0) -
src/compress/gzip/gzip.go
n, z.err = z.compressor.Write(p) return n, z.err } // Flush flushes any pending compressed data to the underlying writer. // // It is useful mainly in compressed network protocols, to ensure that // a remote reader has enough data to reconstruct a packet. Flush does // not return until the data has been written. If the underlying // writer returns an error, Flush returns that error. //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 26 13:32:40 UTC 2024 - 6.2K bytes - Viewed (0) -
src/runtime/tracestack.go
// Estimate the size of this record. This // bound is pretty loose, but avoids counting // lots of varint sizes. // // Add 1 because we might also write traceEvStacks. var flushed bool w, flushed = w.ensure(1 + maxBytes) if flushed { w.byte(byte(traceEvStacks)) } // Emit stack event. w.byte(byte(traceEvStack)) w.varint(uint64(node.id)) w.varint(uint64(len(frames)))
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 21 14:38:56 UTC 2024 - 11K bytes - Viewed (0) -
src/runtime/mcache.go
// to avoid spilling mark bits into the *next* GC cycle. sg := mheap_.sweepgen flushGen := c.flushGen.Load() if flushGen == sg { return } else if flushGen != sg-2 { println("bad flushGen", flushGen, "in prepareForSweep; sweepgen", sg) throw("bad flushGen") } c.releaseAll() stackcache_clear(c) c.flushGen.Store(mheap_.sweepgen) // Synchronizes with gcStart
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 10K bytes - Viewed (0) -
src/text/tabwriter/tabwriter.go
// complete for formatting purposes. func (b *Writer) Flush() error { return b.flush() } // flush is the internal version of Flush, with a named return value which we // don't want to expose. func (b *Writer) flush() (err error) { defer b.handlePanic(&err, "Flush") b.flushNoDefers() return nil } // flushNoDefers is like flush, but without a deferred handlePanic call. This
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Feb 29 16:46:34 UTC 2024 - 17.8K bytes - Viewed (0)