- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 3,585 for room (0.27 sec)
-
src/encoding/ascii85/ascii85.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Sep 08 19:04:28 UTC 2023 - 6.9K bytes - Viewed (0) -
src/runtime/cpuprof.go
// sample rate, at a cost of 1 MiB. profBufWordCount = 1 << 17 // profBufTagCount is the size of the CPU profile buffer's storage for the // goroutine tags associated with each sample. A capacity of 1<<14 means // room for 16k samples, or 160 thread-seconds at a 100 Hz sample rate. profBufTagCount = 1 << 14 ) type cpuProfile struct { lock mutex on bool // profiling is on log *profBuf // profile events written here
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 8.5K bytes - Viewed (0) -
src/runtime/debug_test.go
// The biggest risk is having a write barrier in the debug call // injection test code fire, because it runs in a signal handler // and may not have a P. // // We use 8 Ps so there's room for the debug call worker, // something that's trying to preempt the call worker, and the // goroutine that's trying to stop the call worker. ogomaxprocs := runtime.GOMAXPROCS(8) ogcpercent := debug.SetGCPercent(-1)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Sep 08 15:08:04 UTC 2023 - 8K bytes - Viewed (0) -
src/runtime/traceevent.go
func (e traceEventWriter) end() { e.w.end() } // traceEventWrite is the part of traceEvent that actually writes the event. func (w traceWriter) event(ev traceEv, args ...traceArg) traceWriter { // Make sure we have room. w, _ = w.ensure(1 + (len(args)+1)*traceBytesPerNumber) // Compute the timestamp diff that we'll put in the trace. ts := traceClockNow() if ts <= w.traceBuf.lastTime { ts = w.traceBuf.lastTime + 1 }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:47:01 UTC 2024 - 9.2K bytes - Viewed (0) -
staging/src/k8s.io/apiserver/pkg/endpoints/handlers/create.go
m := strings.Split(u, "/")[0] buf := bytes.NewBuffer(nil) for _, r := range m { // Ignore non-printable characters if !unicode.IsPrint(r) { continue } // Only append if we have room for it if buf.Len()+utf8.RuneLen(r) > validation.FieldManagerMaxLength { break } buf.WriteRune(r) } return buf.String()
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Mon May 01 20:19:46 UTC 2023 - 10.1K bytes - Viewed (0) -
src/compress/lzw/reader.go
// written right-to-left from the end of the buffer before being copied // to the start of the buffer. // It is flushed when it contains >= 1<<maxWidth bytes, // so that there is always room to decode an entire code. output [2 * 1 << maxWidth]byte o int // write index into output toRead []byte // bytes to return from Read } // readLSB returns the next code for "Least Significant Bits first" data.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 12 14:39:39 UTC 2023 - 8K bytes - Viewed (0) -
src/crypto/internal/edwards25519/field/fe_generic.go
// Now all coefficients fit into 64-bit registers but are still too large to // be passed around as an Element. We therefore do one last carry chain, // where the carries will be small enough to fit in the wiggle room above 2⁵¹. *v = Element{rr0, rr1, rr2, rr3, rr4} v.carryPropagate() } func feSquareGeneric(v, a *Element) { l0 := a.l0 l1 := a.l1 l2 := a.l2 l3 := a.l3 l4 := a.l4
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Sep 27 01:16:19 UTC 2023 - 8.5K bytes - Viewed (0) -
tensorflow/c/experimental/filesystem/plugins/gcs/ram_file_block_cache.h
void MaybeFetch(const Key& key, const std::shared_ptr<Block>& block, TF_Status* status) ABSL_LOCKS_EXCLUDED(mu_); /// Trim the block cache to make room for another entry. void Trim() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_); /// Update the LRU iterator for the block at `key`. void UpdateLRU(const Key& key, const std::shared_ptr<Block>& block,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Aug 31 04:46:34 UTC 2020 - 10.6K bytes - Viewed (0) -
src/internal/fuzz/mutators_byteslice.go
b = b[:end+(n*2)] // Copy the block of bytes we want to duplicate to the end of the // slice copy(b[end+n:], b[src:src+n]) // Shift the bytes after the splice point n positions to the right // to make room for the new block copy(b[dst+n:end+n], b[dst:end]) // Insert the duplicate block into the splice point copy(b[dst:], b[end+n:]) b = b[:end+n] return b }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Nov 19 18:23:43 UTC 2021 - 7.7K bytes - Viewed (0) -
pkg/slices/slices.go
func Join(sep string, fields ...string) string { return strings.Join(fields, sep) } // Insert inserts the values v... into s at index i, // returning the modified slice. // The elements at s[i:] are shifted up to make room. // In the returned slice r, r[i] == v[0], // and r[i+len(v)] == value originally at r[i]. // Insert panics if i is out of range. // This function is O(len(s) + len(v)). func Insert[S ~[]E, E any](s S, i int, v ...E) S {
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Wed May 15 06:28:11 UTC 2024 - 7.9K bytes - Viewed (0)