Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 273 for Atack (0.19 sec)

  1. src/runtime/mwbbuf.go

    	// the buffer if the stack has been shaded, or even avoid
    	// putting them in the buffer at all (which would double its
    	// capacity). This is slightly complicated with the buffer; we
    	// could track whether any un-shaded goroutine has used the
    	// buffer, or just track globally whether there are any
    	// un-shaded stacks and flush after each stack scan.
    	gcw := &pp.gcw
    	pos := 0
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 8.1K bytes
    - Viewed (0)
  2. src/runtime/mklockrank.go

    profMemActive < profMemFuture;
    
    # Stack allocation and copying
    gcBitsArenas,
      netpollInit,
      profBlock,
      profInsert,
      profMemFuture,
      spanSetSpine,
      fin,
      root
    # Anything that can grow the stack can acquire STACKGROW.
    # (Most higher layers imply STACKGROW, like MALLOC.)
    < STACKGROW
    # Below STACKGROW is the stack allocator/copying implementation.
    < gscan;
    gscan < stackpool;
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:47:01 UTC 2024
    - 9.1K bytes
    - Viewed (0)
  3. src/cmd/compile/internal/s390x/ggen.go

    // zerorange clears the stack in the given range.
    func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
    	if cnt == 0 {
    		return p
    	}
    
    	// Adjust the frame to account for LR.
    	off += base.Ctxt.Arch.FixedFrameSize
    	reg := int16(s390x.REGSP)
    
    	// If the off cannot fit in a 12-bit unsigned displacement then we
    	// need to create a copy of the stack pointer that we can adjust.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Apr 19 15:59:22 UTC 2022
    - 2.5K bytes
    - Viewed (0)
  4. src/net/rawconn_unix_test.go

    		if operr != nil {
    			return
    		}
    		switch addr := addr.(type) {
    		case *TCPAddr:
    			// There's no guarantee that IP-level socket
    			// options work well with dual stack sockets.
    			// A simple solution would be to take a look
    			// at the bound address to the raw connection
    			// and to classify the address family of the
    			// underlying socket by the bound address:
    			//
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Mar 29 16:24:51 UTC 2022
    - 3K bytes
    - Viewed (0)
  5. src/internal/trace/traceviewer/pprof.go

    			return
    		}
    		defer os.Remove(svgFilename)
    		w.Header().Set("Content-Type", "image/svg+xml")
    		http.ServeFile(w, r, svgFilename)
    	}
    }
    
    type ProfileRecord struct {
    	Stack []*trace.Frame
    	Count uint64
    	Time  time.Duration
    }
    
    func BuildProfile(prof []ProfileRecord) *profile.Profile {
    	p := &profile.Profile{
    		PeriodType: &profile.ValueType{Type: "trace", Unit: "count"},
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Nov 21 21:28:02 UTC 2023
    - 4K bytes
    - Viewed (0)
  6. src/regexp/backtrack.go

    //
    // backtrack is a fast replacement for the NFA code on small
    // regexps when onepass cannot be used.
    
    package regexp
    
    import (
    	"regexp/syntax"
    	"sync"
    )
    
    // A job is an entry on the backtracker's job stack. It holds
    // the instruction pc and the position in the input.
    type job struct {
    	pc  uint32
    	arg bool
    	pos int
    }
    
    const (
    	visitedBits        = 32
    	maxBacktrackProg   = 500        // len(prog.Inst) <= max
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Sep 14 17:25:39 UTC 2023
    - 8.8K bytes
    - Viewed (0)
  7. src/runtime/nonwindows_stub.go

    func enableWER() {}
    
    // winlibcall is not implemented on non-Windows systems,
    // but it is used in non-OS-specific parts of the runtime.
    // Define it as an empty struct to avoid wasting stack space.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Mar 26 03:12:13 UTC 2024
    - 962 bytes
    - Viewed (0)
  8. test/fixedbugs/issue40954.go

    	// we stored an integer in that pointer. That integer just happens
    	// to be the address of i.
    	// v is also the address of i.
    	// p has a base type which is marked not-in-heap, so it
    	// should not be adjusted when the stack is copied.
    	recurse(100, p, v)
    }
    func recurse(n int, p *S, v uintptr) {
    	if n > 0 {
    		recurse(n-1, p, v)
    	}
    	if uintptr(unsafe.Pointer(p)) != v {
    		panic("adjusted notinheap pointer")
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Sep 02 15:27:18 UTC 2022
    - 874 bytes
    - Viewed (0)
  9. test/abi/bad_internal_offsets.go

    	F1 string
    	F2 StructF0S1
    }
    
    type StructF0S1 struct {
    	_ uint16
    }
    
    // 0 returns 3 params
    //go:registerparams
    //go:noinline
    func Test0(p0 uint32, p1 StructF0S0, p2 int32) {
    	// consume some stack space, so as to trigger morestack
    	var pad [256]uint64
    	pad[FailCount]++
    	if p0 == 0 {
    		return
    	}
    	p1f0c := int16(-3096)
    	if p1.F0 != p1f0c {
    		NoteFailureElem(0, "genChecker0", "parm", 1, 0, pad[0])
    		return
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Oct 19 23:33:25 UTC 2023
    - 1.4K bytes
    - Viewed (0)
  10. src/cmd/compile/internal/test/zerorange_test.go

    // ensure that output param is allocated on the heap. Also, since there is a
    // defer, the pointer to each output param must be zeroed in the prologue (see
    // plive.go:epilogue()). So, we will get a block of one or more stack slots that
    // need to be zeroed. Hence, we are testing compilation completes successfully when
    // zerorange calls of various sizes (8-136 bytes) are generated. We are not
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Apr 01 18:18:07 UTC 2022
    - 4.1K bytes
    - Viewed (0)
Back to top