Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 1,068 for spill (0.05 sec)

  1. src/reflect/abi.go

    				stackPtrs.append(1)
    			} else {
    				stackPtrs.append(0)
    			}
    		} else {
    			spill += goarch.PtrSize
    		}
    	}
    	for i, arg := range t.InSlice() {
    		stkStep := in.addArg(arg)
    		if stkStep != nil {
    			addTypeBits(stackPtrs, stkStep.stkOff, arg)
    		} else {
    			spill = align(spill, uintptr(arg.Align()))
    			spill += arg.Size()
    			for _, st := range in.stepsForValue(i) {
    				if st.kind == abiStepPointer {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue May 07 17:08:32 UTC 2024
    - 15K bytes
    - Viewed (0)
  2. src/internal/trace/generation.go

    		evTable: &evTable{
    			pcs: make(map[uint64]frame),
    		},
    		batches: make(map[ThreadID][]batch),
    	}
    	// Process the spilled batch.
    	if spill != nil {
    		g.gen = spill.gen
    		if err := processBatch(g, *spill.batch); err != nil {
    			return nil, nil, err
    		}
    		spill = nil
    	}
    	// Read batches one at a time until we either hit EOF or
    	// the next generation.
    	var spillErr error
    	for {
    		b, gen, err := readBatch(r)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 22:14:45 UTC 2024
    - 12.1K bytes
    - Viewed (0)
  3. src/cmd/internal/obj/link.go

    }
    
    // SpillRegisterArgs emits the code to spill register args into whatever
    // locations the spill records specify.
    func (fi *FuncInfo) SpillRegisterArgs(last *Prog, pa ProgAlloc) *Prog {
    	// Spill register args.
    	for _, ra := range fi.spills {
    		spill := Appendp(last, pa)
    		spill.As = ra.Spill
    		spill.From.Type = TYPE_REG
    		spill.From.Reg = ra.Reg
    		spill.To = ra.Addr
    		last = spill
    	}
    	return last
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 15 19:57:43 UTC 2024
    - 33.1K bytes
    - Viewed (0)
  4. src/cmd/compile/internal/ssa/stackalloc.go

    				t.addAll(spillLive[p.ID])
    				for _, v := range phis {
    					a := v.Args[i]
    					if s.values[a.ID].needSlot {
    						t.add(a.ID)
    					}
    					if spill := s.values[a.ID].spill; spill != nil {
    						//TODO: remove?  Subsumed by SpillUse?
    						t.add(spill.ID)
    					}
    				}
    				if t.size() == len(s.live[p.ID]) {
    					continue
    				}
    				// grow p's live set
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Feb 29 21:29:41 UTC 2024
    - 12.6K bytes
    - Viewed (0)
  5. src/runtime/syscall_windows.go

    	// is already pointing to the right place for smaller
    	// arguments. The same is true on arm.
    
    	oldParts := p.parts
    	if p.tryRegAssignArg(t, 0) {
    		// Account for spill space.
    		//
    		// TODO(mknyszek): Remove this when we no longer have
    		// caller reserved spill space.
    		p.dstSpill = alignUp(p.dstSpill, uintptr(t.Align_))
    		p.dstSpill += t.Size_
    	} else {
    		// Register assignment failed.
    		// Undo the work and stack assign.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 20:12:46 UTC 2024
    - 16.6K bytes
    - Viewed (0)
  6. src/runtime/traceback_test.go

    }
    
    // norace to avoid race instrumentation changing spill locations.
    // nosplit to avoid preemption or morestack spilling registers.
    //
    //go:norace
    //go:nosplit
    //go:noinline
    func testTracebackArgs11b(a, b, c, d int32) int {
    	var x int32
    	if a < 0 {
    		print() // spill b in a conditional
    		x = b
    	} else {
    		print() // spill c in a conditional
    		x = c
    	}
    	if d < 0 { // d is always needed
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Dec 14 17:22:18 UTC 2023
    - 22.9K bytes
    - Viewed (0)
  7. src/cmd/compile/internal/liveness/plive.go

    		//     a. X is in a register -- then X is seen, and the spill slot is also scanned conservatively.
    		//     b. X is spilled -- the spill slot is initialized, and scanned conservatively
    		//     c. X is not live -- the spill slot is scanned conservatively, and it may contain X from an earlier spill.
    		//  4. GC within G, transitively called from F
    		//    a. X is live at call site, therefore is spilled, to its spill slot (which is live because of subsequent LoadReg).
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jun 07 15:22:22 UTC 2024
    - 45.2K bytes
    - Viewed (0)
  8. src/runtime/preempt.go

    		// This may be a problem if we start using more
    		// registers. In that case, we should store registers
    		// in a context object. If we pre-allocate one per P,
    		// asyncPreempt can spill just a few registers to the
    		// stack, then grab its context object and spill into
    		// it. When it enters the runtime, it would allocate a
    		// new context for the P.
    		print("runtime: asyncPreemptStack=", asyncPreemptStack, "\n")
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 15:41:45 UTC 2024
    - 15.1K bytes
    - Viewed (0)
  9. src/runtime/sys_freebsd_amd64.s

    	// Transition from C ABI to Go ABI.
    	PUSH_REGS_HOST_TO_ABI0()
    
    	// Set up ABIInternal environment: g in R14, cleared X15.
    	get_tls(R12)
    	MOVQ	g(R12), R14
    	PXOR	X15, X15
    
    	// Reserve space for spill slots.
    	NOP	SP		// disable vet stack checking
    	ADJSP   $24
    
    	// Call into the Go signal handler
    	MOVQ	DI, AX	// sig
    	MOVQ	SI, BX	// info
    	MOVQ	DX, CX	// ctx
    	CALL	·sigtrampgo<ABIInternal>(SB)
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Jun 06 18:49:01 UTC 2023
    - 12.7K bytes
    - Viewed (0)
  10. src/runtime/mfinal.go

    			for i := fb.cnt; i > 0; i-- {
    				f := &fb.fin[i-1]
    
    				var regs abi.RegArgs
    				// The args may be passed in registers or on stack. Even for
    				// the register case, we still need the spill slots.
    				// TODO: revisit if we remove spill slots.
    				//
    				// Unfortunately because we can have an arbitrary
    				// amount of returns and it would be complex to try and
    				// figure out how many of those can get passed in registers,
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jun 07 01:56:56 UTC 2024
    - 19K bytes
    - Viewed (0)
Back to top