- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 521 for nbits (0.06 sec)
-
src/index/suffixarray/suffixarray_test.go
name = fmt.Sprintf("%dM", size/1e6) } b.Run("size="+name, func(b *testing.B) { for _, bits := range []int{32, 64} { if ^uint(0) == 0xffffffff && bits == 64 { continue } b.Run(fmt.Sprintf("bits=%d", bits), func(b *testing.B) { cleanup := setBits(bits) defer cleanup() b.SetBytes(int64(len(data))) b.ReportAllocs()
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:00:11 UTC 2024 - 14.1K bytes - Viewed (0) -
src/testing/testing_windows.go
delta := a.now - b.now if queryPerformanceFrequency == 0 { queryPerformanceFrequency = windows.QueryPerformanceFrequency() } hi, lo := bits.Mul64(uint64(delta), uint64(time.Second)/uint64(time.Nanosecond)) quo, _ := bits.Div64(hi, lo, uint64(queryPerformanceFrequency)) return time.Duration(quo) } var queryPerformanceFrequency int64 // highPrecisionTimeSince returns duration since a.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 26 22:55:25 UTC 2024 - 1.9K bytes - Viewed (0) -
src/runtime/hash_test.go
// There's not enough bits in the hash output, so we // expect a nontrivial number of collisions, and it is // often quite a bit higher than expected. See issue 43130. t.Skip("Flaky on 32-bit systems") } if testing.Short() { t.Skip("Skipping in short mode") } const BITS = 16 for r := 0; r < k.bits(); r++ { for i := 0; i < 1<<BITS; i++ { k.clear()
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 06 17:50:18 UTC 2024 - 18.4K bytes - Viewed (0) -
src/hash/maphash/smhasher_test.go
t.Skip("Too slow on wasm") } if testing.Short() { t.Skip("Skipping in short mode") } const BITS = 16 h := newHashSet() for r := 0; r < k.bits(); r++ { for i := 0; i < 1<<BITS; i++ { k.clear() for j := 0; j < BITS; j++ { if i>>uint(j)&1 != 0 { k.flipBit((j + r) % k.bits()) } } h.add(k.hash()) } h.check(t) } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 03 16:41:38 UTC 2024 - 11K bytes - Viewed (0) -
src/runtime/mgcscavenge_test.go
// Check hugepage preserving behavior. bits := uint(PhysHugePageSize / uintptr(PageSize)) if bits < PallocChunkPages { tests["PreserveHugePageBottom"] = test{ alloc: []BitRange{{bits + 2, PallocChunkPages - (bits + 2)}}, min: 1, max: 3, // Make it so that max would have us try to break the huge page. want: BitRange{0, bits + 2}, } if 3*bits < PallocChunkPages {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 25.2K bytes - Viewed (0) -
src/compress/bzip2/huffman.go
const invalidNodeValue = 0xffff // Decode reads bits from the given bitReader and navigates the tree until a // symbol is found. func (t *huffmanTree) Decode(br *bitReader) (v uint16) { nodeIndex := uint16(0) // node 0 is the root of the tree. for { node := &t.nodes[nodeIndex] var bit uint16 if br.bits > 0 { // Get next bit - fast path. br.bits-- bit = uint16(br.n>>(br.bits&63)) & 1 } else {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 15 17:44:37 UTC 2024 - 6.7K bytes - Viewed (0) -
android/guava/src/com/google/common/math/BigIntegerMath.java
long numeratorAccum = n; long denominatorAccum = 1; int bits = LongMath.log2(n, CEILING); int numeratorBits = bits; for (int i = 1; i < k; i++) { int p = n - i; int q = i + 1; // log2(p) >= bits - 1, because p >= n/2 if (numeratorBits + bits >= Long.SIZE - 1) { // The numerator is as big as it can get without risking overflow.
Registered: Wed Jun 12 16:38:11 UTC 2024 - Last Modified: Wed Feb 07 17:50:39 UTC 2024 - 18.9K bytes - Viewed (0) -
src/crypto/md5/gen.go
{{end}} // round 1 {{range $i, $s := dup 4 .Shift1 -}} {{printf "arg0 = arg1 + bits.RotateLeft32((((arg2^arg3)&arg1)^arg3)+arg0+x%x+%#08x, %d)" (idx 1 $i) (index $.Table1 $i) $s | relabel}} {{rotate -}} {{end}} // round 2 {{range $i, $s := dup 4 .Shift2 -}} {{printf "arg0 = arg1 + bits.RotateLeft32((((arg1^arg2)&arg3)^arg2)+arg0+x%x+%#08x, %d)" (idx 2 $i) (index $.Table2 $i) $s | relabel}}
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 13 18:57:38 UTC 2024 - 4.7K bytes - Viewed (0) -
src/runtime/histogram.go
// // 011000001 // ^-- // │ ^ // │ └---- Next 2 bits -> sub-bucket 3 // └------- Bit 9 unset -> bucket 0 // // 110000001 // ^-- // │ ^ // │ └---- Next 2 bits -> sub-bucket 2 // └------- Bit 9 set -> bucket 1 // // 1000000010 // ^-- ^ // │ ^ └-- Lower bits ignored // │ └---- Next 2 bits -> sub-bucket 0 // └------- Bit 10 set -> bucket 2 //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 7.3K bytes - Viewed (0) -
src/runtime/arena.go
if h.valid+valid <= ptrBits { // Fast path - just accumulate the bits. h.mask |= bits << h.valid h.valid += valid return h } // Too many bits to fit in this word. Write the current word // out and move on to the next word. data := h.mask | bits<<h.valid // mask for this word h.mask = bits >> (ptrBits - h.valid) // leftover for next word
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:44:56 UTC 2024 - 37.9K bytes - Viewed (0)