- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 48 for vSlots (1.3 sec)
-
test/fixedbugs/issue29312.go
// license that can be found in the LICENSE file. // This test is not for a fix of 29312 proper, but for the patch that // makes sure we at least don't have a security hole because of 29312. // This code generates lots of types. The binary should contain // a runtime.slicetype for each of the following 253 types: // // []*pwn // [][]*pwn // ... // [][]...[][]*pwn - 249 total "[]"
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Aug 09 11:28:56 UTC 2022 - 4.1K bytes - Viewed (0) -
src/internal/types/testdata/fixedbugs/issue50929.go
func _() { // TODO(gri) only report one error below (issue #50932) var x F /* ERROR "not enough type arguments for type F: have 1, want 2" */ [int] G(x /* ERROR "does not match" */) } // test case from issue // (lots of errors but doesn't crash anymore) type RC[G any, RG any] interface { ~[]RG } type RG[G any] struct{} type RSC[G any] []*RG[G] type M[Rc RC[G, RG], G any, RG any] struct { Fn func(Rc) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Dec 08 00:50:55 UTC 2023 - 1.6K bytes - Viewed (0) -
src/runtime/tracetype.go
// The maximum number of bytes required to hold the encoded type. maxBytes := 1 + 5*traceBytesPerNumber + len(typName) // Estimate the size of this record. This // bound is pretty loose, but avoids counting // lots of varint sizes. // // Add 1 because we might also write a traceAllocFreeTypesBatch byte. var flushed bool w, flushed = w.ensure(1 + maxBytes) if flushed { // Annotate the batch as containing types.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 20:31:27 UTC 2024 - 2.3K bytes - Viewed (0) -
src/cmd/compile/internal/test/mergelocals_test.go
} } } func TestMergeLocalsIntegration(t *testing.T) { testenv.MustHaveGoBuild(t) // This test does a build of a specific canned package to // check whether merging of stack slots is taking place. // The idea is to do the compile with a trace option turned // on and then pick up on the frame offsets of specific // variables. // // Stack slot merging is a greedy algorithm, and there can
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 18 15:43:53 UTC 2024 - 4.7K bytes - Viewed (0) -
src/cmd/compile/internal/test/zerorange_test.go
// ensure that output param is allocated on the heap. Also, since there is a // defer, the pointer to each output param must be zeroed in the prologue (see // plive.go:epilogue()). So, we will get a block of one or more stack slots that // need to be zeroed. Hence, we are testing compilation completes successfully when // zerorange calls of various sizes (8-136 bytes) are generated. We are not
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 01 18:18:07 UTC 2022 - 4.1K bytes - Viewed (0) -
src/internal/coverage/cfile/apis.go
} // Implementation note: this function would be faster and simpler // if we could just zero out the entire counter array, but for the // moment we go through and zero out just the slots in the array // corresponding to the counter values. We do this to avoid the // following bad scenario: suppose that a user builds their Go // program with "-cover", and that program has a function (call it
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 09:57:47 UTC 2024 - 5.4K bytes - Viewed (0) -
src/cmd/vendor/github.com/google/pprof/internal/report/stacks.go
// Alternative names to display (with decreasing lengths) to make text fit. // Guaranteed to be non-empty. Display []string // Places holds the list of stack slots where this source occurs. // In particular, if [a,b] is an element in Places, // StackSet.Stacks[a].Sources[b] points to this source. // // No stack will be referenced twice in the Places slice for a given
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Feb 16 15:19:53 UTC 2024 - 5.6K bytes - Viewed (0) -
src/internal/stringslite/strings.go
c0 := substr[0] c1 := substr[1] i := 0 t := len(s) - n + 1 fails := 0 for i < t { if s[i] != c0 { // IndexByte is faster than bytealg.IndexString, so use it as long as // we're not getting lots of false positives. o := IndexByte(s[i+1:t], c0) if o < 0 { return -1 } i += o + 1 } if s[i+1] == c1 && s[i:i+n] == substr { return i } fails++ i++
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat May 04 01:23:42 UTC 2024 - 2.9K bytes - Viewed (0) -
src/runtime/pprof/vminfo_darwin_test.go
t.Logf("vmmap output: %s", out) if ee, ok := cmdErr.(*exec.ExitError); ok && len(ee.Stderr) > 0 { t.Logf("%v: %v\n%s", cmd, cmdErr, ee.Stderr) if testing.Short() && strings.Contains(string(ee.Stderr), "No process corpse slots currently available, waiting to get one") { t.Skipf("Skipping knwn flake in short test mode") } retryable = bytes.Contains(ee.Stderr, []byte("resource shortage")) } t.Logf("%v: %v\n", cmd, cmdErr)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 16 19:59:50 UTC 2024 - 5.5K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/tighten.go
b.Values[last] = nil b.Values = b.Values[:last] changed = true i-- } } } } // phiTighten moves constants closer to phi users. // This pass avoids having lots of constants live for lots of the program. // See issue 16407. func phiTighten(f *Func) { for _, b := range f.Blocks { for _, v := range b.Values { if v.Op != OpPhi { continue } for i, a := range v.Args {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 16 01:01:38 UTC 2023 - 7.7K bytes - Viewed (0)