Search Options

Results per page
Sort
Preferred Languages
Advance

Results 61 - 70 of 257 for Atack (0.06 sec)

  1. src/runtime/os_linux.go

    // Version of newosproc that doesn't require a valid G.
    //
    //go:nosplit
    func newosproc0(stacksize uintptr, fn unsafe.Pointer) {
    	stack := sysAlloc(stacksize, &memstats.stacks_sys)
    	if stack == nil {
    		writeErrStr(failallocatestack)
    		exit(1)
    	}
    	ret := clone(cloneFlags, unsafe.Pointer(uintptr(stack)+stacksize), nil, nil, fn)
    	if ret < 0 {
    		writeErrStr(failthreadcreate)
    		exit(1)
    	}
    }
    
    const (
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 25.9K bytes
    - Viewed (0)
  2. src/cmd/compile/internal/ir/func.go

    	if base.Debug.Closure > 0 {
    		if clo.Esc() == EscHeap {
    			base.WarnfAt(clo.Pos(), "heap closure, captured vars = %v", clo.Func.ClosureVars)
    		} else {
    			base.WarnfAt(clo.Pos(), "stack closure, captured vars = %v", clo.Func.ClosureVars)
    		}
    	}
    	if base.Flag.CompilingRuntime && clo.Esc() == EscHeap && !clo.IsGoWrap {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 01:05:44 UTC 2024
    - 21.1K bytes
    - Viewed (0)
  3. src/compress/gzip/gunzip_test.go

    	z, err := NewReader(r)
    	if err != nil {
    		t.Fatalf("NewReader: got %v, want nil", err)
    	}
    	// Prior to CVE-2022-30631 fix, this would cause an unrecoverable panic due
    	// to stack exhaustion.
    	_, err = z.Read(make([]byte, 10))
    	if err != io.EOF {
    		t.Errorf("Reader.Read: got %v, want %v", err, io.EOF)
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Jul 12 15:06:07 UTC 2022
    - 19.5K bytes
    - Viewed (0)
  4. src/compress/flate/deflate.go

    		if d.hashOffset > maxHashOffset {
    			delta := d.hashOffset - 1
    			d.hashOffset -= delta
    			d.chainHead -= delta
    
    			// Iterate over slices instead of arrays to avoid copying
    			// the entire table onto the stack (Issue #18625).
    			for i, v := range d.hashPrev[:] {
    				if int(v) > delta {
    					d.hashPrev[i] = uint32(int(v) - delta)
    				} else {
    					d.hashPrev[i] = 0
    				}
    			}
    			for i, v := range d.hashHead[:] {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Apr 26 13:32:40 UTC 2024
    - 20.3K bytes
    - Viewed (0)
  5. src/runtime/metrics.go

    		},
    		"/gc/pauses:seconds": {
    			compute: func(_ *statAggregate, out *metricValue) {
    				// N.B. this is identical to /sched/pauses/total/gc:seconds.
    				sched.stwTotalTimeGC.write(out)
    			},
    		},
    		"/gc/stack/starting-size:bytes": {
    			compute: func(in *statAggregate, out *metricValue) {
    				out.kind = metricKindUint64
    				out.scalar = uint64(startingStackSize)
    			},
    		},
    		"/memory/classes/heap/free:bytes": {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Apr 08 21:03:13 UTC 2024
    - 26K bytes
    - Viewed (0)
  6. src/cmd/internal/obj/x86/obj6.go

    		}
    
    		// Mark the stack bound check and morestack call async nonpreemptible.
    		// If we get preempted here, when resumed the preemption request is
    		// cleared, but we'll still call morestack, which will double the stack
    		// unnecessarily. See issue #35470.
    		p = ctxt.StartUnsafePoint(p, newprog)
    	} else if framesize <= abi.StackBig {
    		// large stack: SP-framesize <= stackguard-StackSmall
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Sep 08 18:36:45 UTC 2023
    - 40.9K bytes
    - Viewed (0)
  7. src/net/http/clientserver_test.go

    		}
    		if gotLog == "" {
    			if d > 0 {
    				t.Logf("wanted a stack trace logged; got nothing after %v", d)
    			}
    			return false
    		}
    		if !strings.Contains(gotLog, "created by ") && strings.Count(gotLog, "\n") < 6 {
    			if d > 0 {
    				t.Logf("output doesn't look like a panic stack trace after %v. Got: %s", d, gotLog)
    			}
    			return false
    		}
    		return true
    	})
    }
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 01:00:11 UTC 2024
    - 46.6K bytes
    - Viewed (0)
  8. src/runtime/trace.go

    package runtime
    
    import (
    	"internal/runtime/atomic"
    	"unsafe"
    )
    
    // Trace state.
    
    // trace is global tracing context.
    var trace struct {
    	// trace.lock must only be acquired on the system stack where
    	// stack splits cannot happen while it is held.
    	lock mutex
    
    	// Trace buffer management.
    	//
    	// First we check the empty list for any free buffers. If not, buffers
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 21:17:41 UTC 2024
    - 37.1K bytes
    - Viewed (0)
  9. src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go

    	}
    
    	// Common individual register masks
    	var (
    		gp         = buildReg("R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31") // R1 is LR, R2 is thread pointer, R3 is stack pointer, R22 is g, R30 is REGTMP
    		gpg        = gp | buildReg("g")
    		gpsp       = gp | buildReg("SP")
    		gpspg      = gpg | buildReg("SP")
    		gpspsbg    = gpspg | buildReg("SB")
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Nov 21 19:04:19 UTC 2023
    - 25.2K bytes
    - Viewed (0)
  10. src/runtime/syscall_windows_test.go

    	}
    	if !strings.Contains(o, "Exception 0xbad") {
    		t.Fatalf("No stack trace: %v", o)
    	}
    }
    
    func TestZeroDivisionException(t *testing.T) {
    	o := runTestProg(t, "testprog", "ZeroDivisionException")
    	if !strings.Contains(o, "panic: runtime error: integer divide by zero") {
    		t.Fatalf("No stack trace: %v", o)
    	}
    }
    
    func TestWERDialogue(t *testing.T) {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Aug 31 16:31:35 UTC 2023
    - 32.5K bytes
    - Viewed (0)
Back to top