- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 13 for wraparound (0.16 sec)
-
src/runtime/profbuf.go
// "Effective" offsets means the total number of reads or writes, mod 2^length. // The offset in the buffer is the effective offset mod the length of the buffer. // To make wraparound mod 2^length match wraparound mod length of the buffer, // the length of the buffer must be a power of two. // // If the reader catches up to the writer, a flag passed to read controls
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 18.2K bytes - Viewed (0) -
test/chan/select2.go
alloc := memstats.Alloc // second time shouldn't increase footprint by much go sender(c, 100000) receiver(c, dummy, 100000) runtime.GC() runtime.ReadMemStats(memstats) // Be careful to avoid wraparound. if memstats.Alloc > alloc && memstats.Alloc-alloc > 1.1e5 { println("BUG: too much memory for 100,000 selects:", memstats.Alloc-alloc) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 02 13:43:18 UTC 2016 - 1K bytes - Viewed (0) -
src/compress/flate/writer_test.go
w.Close() for ; offset <= 256; offset *= 2 { w, err := NewWriter(io.Discard, level) if err != nil { t.Fatalf("NewWriter: level %d: %v", level, err) } // Reset until we are right before the wraparound. // Each reset adds maxMatchOffset to the offset. for i := 0; i < (bufferReset-len(in)-offset-maxMatchOffset)/maxMatchOffset; i++ { // skip ahead to where we are close to wrap around... w.d.reset(nil) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Oct 20 18:41:18 UTC 2020 - 5.4K bytes - Viewed (0) -
src/compress/flate/deflatefast.go
func (e *deflateFast) reset() { e.prev = e.prev[:0] // Bump the offset, so all matches will fail distance check. // Nothing should be >= e.cur in the table. e.cur += maxMatchOffset // Protect against e.cur wraparound. if e.cur >= bufferReset { e.shiftOffsets() } } // shiftOffsets will shift down all match offset. // This is only called in rare situations to prevent integer overflow. //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Oct 19 18:48:17 UTC 2020 - 9.4K bytes - Viewed (0) -
src/net/net_fake.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Feb 27 19:24:21 UTC 2024 - 26.4K bytes - Viewed (0) -
src/compress/flate/deflate_test.go
wantSecondTokens := len(enc.encode(nil, testData)) if wantFirstTokens <= wantSecondTokens { t.Fatalf("test needs matches between inputs to be generated") } // Forward the current indicator to before wraparound. enc.cur = bufferReset - int32(len(testData)) // Part 1 before wrap, should match clean state. got := len(enc.encode(nil, testData)) if wantFirstTokens != got {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Jun 14 00:03:57 UTC 2023 - 25.6K bytes - Viewed (0) -
src/cmd/internal/obj/wasm/wasmobj.go
p = appendp(p, AI32Const, constAddr(framesize-abi.StackSmall)) p = appendp(p, AI32Add) p = appendp(p, AI32LeU) } // TODO(neelance): handle wraparound case p = appendp(p, AIf) // This CALL does *not* have a resume point after it // (we already inserted all of the resume points). As // a result, morestack will resume at the *previous*
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Jun 14 00:03:57 UTC 2023 - 34.6K bytes - Viewed (0) -
src/crypto/tls/conn.go
if hc.seq[i] != 0 { return } } // Not allowed to let sequence number wrap. // Instead, must renegotiate before it does. // Not likely enough to bother. panic("TLS: sequence number wraparound") } // explicitNonceLen returns the number of bytes of explicit nonce or IV included // in each record. Explicit nonces are present only in CBC modes after TLS 1.0 // and in certain AEAD modes in TLS 1.2.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 03:10:12 UTC 2024 - 51.8K bytes - Viewed (0) -
src/runtime/mgc.go
return false } lastgc := int64(atomic.Load64(&memstats.last_gc_nanotime)) return lastgc != 0 && t.now-lastgc > forcegcperiod case gcTriggerCycle: // t.n > work.cycles, but accounting for wraparound. return int32(t.n-work.cycles.Load()) > 0 } return true } // gcStart starts the GC. It transitions from _GCoff to _GCmark (if // debug.gcstoptheworld == 0) or performs all of GC (if
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 16:25:21 UTC 2024 - 62K bytes - Viewed (0) -
src/runtime/mprof.go
// increment increases the cycle count by one, wrapping the value at // mProfCycleWrap. It clears the flushed flag. func (c *mProfCycleHolder) increment() { // We explicitly wrap mProfCycle rather than depending on // uint wraparound because the memRecord.future ring does not // itself wrap at a power of two. for { prev := c.value.Load() cycle := prev >> 1 cycle = (cycle + 1) % mProfCycleWrap next := cycle << 1
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:57:37 UTC 2024 - 53.3K bytes - Viewed (0)