- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 107 for Atack (0.04 sec)
-
src/cmd/covdata/covdata.go
"os" "runtime" "runtime/pprof" "strings" ) var verbflag = flag.Int("v", 0, "Verbose trace output level") var hflag = flag.Bool("h", false, "Panic on fatal errors (for stack trace)") var hwflag = flag.Bool("hw", false, "Panic on warnings (for stack trace)") var indirsflag = flag.String("i", "", "Input dirs to examine (comma separated)") var pkgpatflag = flag.String("pkg", "", "Restrict output to package(s) matching specified package pattern.")
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 14 19:41:17 UTC 2024 - 5.7K bytes - Viewed (0) -
src/io/fs/glob_test.go
if err != path.ErrBadPattern { t.Errorf("Glob(fs, %#q) returned err=%v, want path.ErrBadPattern", pattern, err) } } } func TestCVE202230630(t *testing.T) { // Prior to CVE-2022-30630, a stack exhaustion would occur given a large // number of separators. There is now a limit of 10,000. _, err := Glob(os.DirFS("."), "/*"+strings.Repeat("/", 10001)) if err != path.ErrBadPattern {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 07 18:36:52 UTC 2024 - 2.3K bytes - Viewed (0) -
src/cmd/trace/procgen.go
gs.augmentName(st.Stack) // Handle the goroutine state transition. from, to := st.Goroutine() if from == to { // Filter out no-op events. return } if from == trace.GoRunning && !to.Executing() { if to == trace.GoWaiting { // Goroutine started blocking. gs.block(ev.Time(), ev.Stack(), st.Reason, ctx) } else { gs.stop(ev.Time(), ev.Stack(), ctx) } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 6.1K bytes - Viewed (0) -
src/cmd/vendor/golang.org/x/telemetry/counter/doc.go
// There are two kinds of counters, basic counters and stack counters. // Basic counters are created by [New]. // Stack counters are created by [NewStack]. // Both are incremented by calling Inc(). // // Basic counters are very cheap. Stack counters are more expensive, as they // require parsing the stack. (Stack counters are implemented as basic counters // whose names are the concatenation of the name and the stack trace. There is
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:10:54 UTC 2024 - 2.4K bytes - Viewed (0) -
src/runtime/defs_dragonfly_amd64.go
_EVFILT_USER = -0x9 _NOTE_TRIGGER = 0x1000000 ) type rtprio struct { _type uint16 prio uint16 } type lwpparams struct { start_func uintptr arg unsafe.Pointer stack uintptr tid1 unsafe.Pointer // *int32 tid2 unsafe.Pointer // *int32 } type sigset struct { __bits [4]uint32 } type stackt struct { ss_sp uintptr ss_size uintptr
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 12 21:17:22 UTC 2024 - 3.5K bytes - Viewed (0) -
src/cmd/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go
} // restOfBlock, given a traversal stack, finds the innermost containing // block and returns the suffix of its statements starting with the current // node, along with the number of call expressions encountered. func restOfBlock(stack []ast.Node) ([]ast.Stmt, int) { var ncalls int for i := len(stack) - 1; i >= 0; i-- { if b, ok := stack[i].(*ast.BlockStmt); ok { for j, v := range b.List {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 02 02:20:05 UTC 2024 - 5K bytes - Viewed (0) -
src/reflect/makefunc.go
} t := typ.common() ftyp := (*funcType)(unsafe.Pointer(t)) code := abi.FuncPCABI0(makeFuncStub) // makeFuncImpl contains a stack map for use by the runtime _, _, abid := funcLayout(ftyp, nil) impl := &makeFuncImpl{ makeFuncCtxt: makeFuncCtxt{ fn: code, stack: abid.stackPtrs, argLen: abid.stackCallArgsSize, regPtrs: abid.inRegPtrs, }, ftyp: ftyp, fn: fn, }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 02 15:20:05 UTC 2024 - 5.9K bytes - Viewed (0) -
src/runtime/cpuprof.go
} prof.signalLock.Store(0) } // addNonGo adds the non-Go stack trace to the profile. // It is called from a non-Go thread, so we cannot use much stack at all, // nor do anything that needs a g or an m. // In particular, we can't call cpuprof.log.write. // Instead, we copy the stack into cpuprof.extra, // which will be drained the next time a Go thread // gets the signal handling event. //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 8.5K bytes - Viewed (0) -
src/cmd/compile/internal/types2/mono.go
// vertices we visited earlier cannot be part of the cycle. for stack[0] != v { stack = stack[1:] } // TODO(mdempsky): Pivot stack so we report the cycle from the top? err := check.newError(InvalidInstanceCycle) obj0 := check.mono.vertices[v].obj err.addf(obj0, "instantiation cycle:") qf := RelativeTo(check.pkg) for _, v := range stack { edge := check.mono.edges[check.mono.vertices[v].pre]
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Feb 28 00:05:29 UTC 2024 - 9.1K bytes - Viewed (0) -
src/go/ast/commentmap.go
r.end = r.fset.Position(r.comment.End()) r.index++ } } // A nodeStack keeps track of nested nodes. // A node lower on the stack lexically contains the nodes higher on the stack. type nodeStack []Node // push pops all nodes that appear lexically before n // and then pushes n on the stack. func (s *nodeStack) push(n Node) { s.pop(n.Pos()) *s = append((*s), n) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:00:11 UTC 2024 - 8.9K bytes - Viewed (0)