Search Options

Results per page
Sort
Preferred Languages
Advance

Results 61 - 70 of 263 for Atack (0.05 sec)

  1. src/runtime/os_linux.go

    // Version of newosproc that doesn't require a valid G.
    //
    //go:nosplit
    func newosproc0(stacksize uintptr, fn unsafe.Pointer) {
    	stack := sysAlloc(stacksize, &memstats.stacks_sys)
    	if stack == nil {
    		writeErrStr(failallocatestack)
    		exit(1)
    	}
    	ret := clone(cloneFlags, unsafe.Pointer(uintptr(stack)+stacksize), nil, nil, fn)
    	if ret < 0 {
    		writeErrStr(failthreadcreate)
    		exit(1)
    	}
    }
    
    const (
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 25.9K bytes
    - Viewed (0)
  2. src/cmd/compile/internal/ir/func.go

    	if base.Debug.Closure > 0 {
    		if clo.Esc() == EscHeap {
    			base.WarnfAt(clo.Pos(), "heap closure, captured vars = %v", clo.Func.ClosureVars)
    		} else {
    			base.WarnfAt(clo.Pos(), "stack closure, captured vars = %v", clo.Func.ClosureVars)
    		}
    	}
    	if base.Flag.CompilingRuntime && clo.Esc() == EscHeap && !clo.IsGoWrap {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 01:05:44 UTC 2024
    - 21.1K bytes
    - Viewed (0)
  3. src/compress/gzip/gunzip_test.go

    	z, err := NewReader(r)
    	if err != nil {
    		t.Fatalf("NewReader: got %v, want nil", err)
    	}
    	// Prior to CVE-2022-30631 fix, this would cause an unrecoverable panic due
    	// to stack exhaustion.
    	_, err = z.Read(make([]byte, 10))
    	if err != io.EOF {
    		t.Errorf("Reader.Read: got %v, want %v", err, io.EOF)
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Jul 12 15:06:07 UTC 2022
    - 19.5K bytes
    - Viewed (0)
  4. src/compress/flate/deflate.go

    		if d.hashOffset > maxHashOffset {
    			delta := d.hashOffset - 1
    			d.hashOffset -= delta
    			d.chainHead -= delta
    
    			// Iterate over slices instead of arrays to avoid copying
    			// the entire table onto the stack (Issue #18625).
    			for i, v := range d.hashPrev[:] {
    				if int(v) > delta {
    					d.hashPrev[i] = uint32(int(v) - delta)
    				} else {
    					d.hashPrev[i] = 0
    				}
    			}
    			for i, v := range d.hashHead[:] {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Apr 26 13:32:40 UTC 2024
    - 20.3K bytes
    - Viewed (0)
  5. src/runtime/metrics.go

    		},
    		"/gc/pauses:seconds": {
    			compute: func(_ *statAggregate, out *metricValue) {
    				// N.B. this is identical to /sched/pauses/total/gc:seconds.
    				sched.stwTotalTimeGC.write(out)
    			},
    		},
    		"/gc/stack/starting-size:bytes": {
    			compute: func(in *statAggregate, out *metricValue) {
    				out.kind = metricKindUint64
    				out.scalar = uint64(startingStackSize)
    			},
    		},
    		"/memory/classes/heap/free:bytes": {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Apr 08 21:03:13 UTC 2024
    - 26K bytes
    - Viewed (0)
  6. src/syscall/zsysnum_netbsd_amd64.go

    	SYS_GETSID               = 286 // { pid_t|sys||getsid(pid_t pid); }
    	SYS___CLONE              = 287 // { pid_t|sys||__clone(int flags, void *stack); }
    	SYS_FKTRACE              = 288 // { int|sys||fktrace(int fd, int ops, int facs, pid_t pid); }
    	SYS_PREADV               = 289 // { ssize_t|sys||preadv(int fd, const struct iovec *iovp, int iovcnt, int PAD, off_t offset); }
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Oct 28 18:17:57 UTC 2021
    - 25.7K bytes
    - Viewed (0)
  7. src/syscall/zsysnum_netbsd_arm.go

    	SYS_GETSID               = 286 // { pid_t|sys||getsid(pid_t pid); }
    	SYS___CLONE              = 287 // { pid_t|sys||__clone(int flags, void *stack); }
    	SYS_FKTRACE              = 288 // { int|sys||fktrace(int fd, int ops, int facs, pid_t pid); }
    	SYS_PREADV               = 289 // { ssize_t|sys||preadv(int fd, const struct iovec *iovp, int iovcnt, int PAD, off_t offset); }
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Oct 28 18:17:57 UTC 2021
    - 25.7K bytes
    - Viewed (0)
  8. src/cmd/internal/obj/x86/obj6.go

    		}
    
    		// Mark the stack bound check and morestack call async nonpreemptible.
    		// If we get preempted here, when resumed the preemption request is
    		// cleared, but we'll still call morestack, which will double the stack
    		// unnecessarily. See issue #35470.
    		p = ctxt.StartUnsafePoint(p, newprog)
    	} else if framesize <= abi.StackBig {
    		// large stack: SP-framesize <= stackguard-StackSmall
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Sep 08 18:36:45 UTC 2023
    - 40.9K bytes
    - Viewed (0)
  9. src/net/http/clientserver_test.go

    		}
    		if gotLog == "" {
    			if d > 0 {
    				t.Logf("wanted a stack trace logged; got nothing after %v", d)
    			}
    			return false
    		}
    		if !strings.Contains(gotLog, "created by ") && strings.Count(gotLog, "\n") < 6 {
    			if d > 0 {
    				t.Logf("output doesn't look like a panic stack trace after %v. Got: %s", d, gotLog)
    			}
    			return false
    		}
    		return true
    	})
    }
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 01:00:11 UTC 2024
    - 46.6K bytes
    - Viewed (0)
  10. src/runtime/trace.go

    package runtime
    
    import (
    	"internal/runtime/atomic"
    	"unsafe"
    )
    
    // Trace state.
    
    // trace is global tracing context.
    var trace struct {
    	// trace.lock must only be acquired on the system stack where
    	// stack splits cannot happen while it is held.
    	lock mutex
    
    	// Trace buffer management.
    	//
    	// First we check the empty list for any free buffers. If not, buffers
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 21:17:41 UTC 2024
    - 37.1K bytes
    - Viewed (0)
Back to top