Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 3 of 3 for movxt (0.15 sec)

  1. src/cmd/internal/obj/ppc64/obj9.go

    			q.As = AADD
    			q.From.Type = obj.TYPE_CONST
    			q.From.Offset = p.From.Offset
    			p.From.Offset = 0
    			q.To = p.To
    		}
    		return
    
    	}
    
    	// MOVx sym, Ry becomes MOVD symtoc, REGTMP; MOVx (REGTMP), Ry
    	// MOVx Ry, sym becomes MOVD symtoc, REGTMP; MOVx Ry, (REGTMP)
    	// An addition may be inserted between the two MOVs if there is an offset.
    
    	q := obj.Appendp(p, c.newprog)
    	q.As = AMOVD
    	q.From.Type = obj.TYPE_MEM
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 18:17:17 UTC 2024
    - 40.8K bytes
    - Viewed (0)
  2. src/cmd/internal/obj/arm64/asm7.go

    	/* reloc ops */
    	case 64: /* movT R,addr -> adrp + movT R, (REGTMP) */
    		if p.From.Reg == REGTMP {
    			c.ctxt.Diag("cannot use REGTMP as source: %v\n", p)
    		}
    		o1 = ADR(1, 0, REGTMP)
    		rel := obj.Addrel(c.cursym)
    		rel.Off = int32(c.pc)
    		rel.Siz = 8
    		rel.Sym = p.To.Sym
    		rel.Add = p.To.Offset
    		// For unaligned access, fall back to adrp + add + movT R, (REGTMP).
    		if o.size(c.ctxt, p) != 8 {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 15 15:44:14 UTC 2024
    - 201.1K bytes
    - Viewed (0)
  3. src/cmd/compile/internal/ssa/_gen/AMD64.rules

    (MOV(Q|L|W|B|O)storeconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd32(off) =>
    	(MOV(Q|L|W|B|O)storeconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
    
    // We need to fold LEAQ into the MOVx ops so that the live variable analysis knows
    // what variables are being read/written by the ops.
    (MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Mar 12 19:38:41 UTC 2024
    - 93.9K bytes
    - Viewed (0)
Back to top