Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 11 for CMPBconstload (0.22 sec)

  1. src/cmd/compile/internal/ssa/_gen/386splitload.rules

    (CMPLconstload {sym} [vo] ptr mem) => (CMPLconst (MOVLload {sym} [vo.Off()] ptr mem) [vo.Val()])
    (CMPWconstload {sym} [vo] ptr mem) => (CMPWconst (MOVWload {sym} [vo.Off()] ptr mem) [vo.Val16()])
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Oct 04 19:35:46 UTC 2022
    - 620 bytes
    - Viewed (0)
  2. src/cmd/compile/internal/ssa/_gen/AMD64splitload.rules

    //
    // For addressingmodes, certain single instructions are slower than the two instruction
    // split generated here (which is different from the inputs to addressingmodes).
    // For example:
    // (CMPBconstload c (ADDQ x y)) -> (CMPBconstloadidx1 c x y) -> (CMPB c (MOVBloadidx1 x y))
    
    (CMP(Q|L|W|B)load {sym} [off] ptr x mem) => (CMP(Q|L|W|B) (MOV(Q|L|W|B)load {sym} [off] ptr mem) x)
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Oct 04 19:35:46 UTC 2022
    - 3.4K bytes
    - Viewed (0)
  3. src/cmd/compile/internal/ssa/rewrite386splitload.go

    	}
    	return false
    }
    func rewriteValue386splitload_Op386CMPBconstload(v *Value) bool {
    	v_1 := v.Args[1]
    	v_0 := v.Args[0]
    	b := v.Block
    	typ := &b.Func.Config.Types
    	// match: (CMPBconstload {sym} [vo] ptr mem)
    	// result: (CMPBconst (MOVBload {sym} [vo.Off()] ptr mem) [vo.Val8()])
    	for {
    		vo := auxIntToValAndOff(v.AuxInt)
    		sym := auxToSym(v.Aux)
    		ptr := v_0
    		mem := v_1
    		v.reset(Op386CMPBconst)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Jan 19 22:42:34 UTC 2023
    - 4K bytes
    - Viewed (0)
  4. src/cmd/compile/internal/ssa/rewriteAMD64splitload.go

    	}
    	return false
    }
    func rewriteValueAMD64splitload_OpAMD64CMPBconstload(v *Value) bool {
    	v_1 := v.Args[1]
    	v_0 := v.Args[0]
    	b := v.Block
    	typ := &b.Func.Config.Types
    	// match: (CMPBconstload {sym} [vo] ptr mem)
    	// cond: vo.Val() == 0
    	// result: (TESTB x:(MOVBload {sym} [vo.Off()] ptr mem) x)
    	for {
    		vo := auxIntToValAndOff(v.AuxInt)
    		sym := auxToSym(v.Aux)
    		ptr := v_0
    		mem := v_1
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Jan 19 22:42:34 UTC 2023
    - 21.4K bytes
    - Viewed (0)
  5. src/cmd/compile/internal/ssa/addressingmodes.go

    			if needSplit[c] {
    				// It turns out that some of the combined instructions have faster two-instruction equivalents,
    				// but not the two instructions that led to them being combined here.  For example
    				// (CMPBconstload c (ADDQ x y)) -> (CMPBconstloadidx1 c x y) -> (CMPB c (MOVBloadidx1 x y))
    				// The final pair of instructions turns out to be notably faster, at least in some benchmarks.
    				f.Config.splitLoad(v)
    			}
    		}
    	}
    }
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Jul 26 17:19:57 UTC 2023
    - 24.3K bytes
    - Viewed (0)
  6. src/cmd/compile/internal/ssa/_gen/386Ops.go

    		{name: "CMPWconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPW", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
    		{name: "CMPBconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPB", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Mar 14 08:10:32 UTC 2023
    - 45.1K bytes
    - Viewed (0)
  7. src/cmd/compile/internal/ssa/_gen/AMD64Ops.go

    		{name: "CMPWconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPW", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
    		{name: "CMPBconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPB", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
    
    		// CMPxloadidx: compare *(arg0+N*arg1+auxint+aux) to arg2 (in that order). arg3=mem.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Aug 04 16:40:24 UTC 2023
    - 98K bytes
    - Viewed (1)
  8. src/cmd/compile/internal/ssa/_gen/AMD64.rules

    (CMPWload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem)
    (CMPBload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem)
    
    (TEST(Q|L|W|B)  l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) l2)
            && l == l2
    	&& l.Uses == 2
    	&& clobber(l) =>
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Mar 12 19:38:41 UTC 2024
    - 93.9K bytes
    - Viewed (0)
  9. src/cmd/compile/internal/ssa/rewriteAMD64.go

    	}
    	return false
    }
    func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool {
    	v_1 := v.Args[1]
    	v_0 := v.Args[0]
    	// match: (CMPBconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
    	// cond: ValAndOff(valoff1).canAdd32(off2)
    	// result: (CMPBconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
    	for {
    		valoff1 := auxIntToValAndOff(v.AuxInt)
    		sym := auxToSym(v.Aux)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Mar 12 19:38:41 UTC 2024
    - 712.7K bytes
    - Viewed (0)
  10. src/cmd/compile/internal/ssa/rewrite386.go

    		}
    		x := v_0
    		v.reset(Op386TESTB)
    		v.AddArg2(x, x)
    		return true
    	}
    	// match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c])
    	// cond: l.Uses == 1 && clobber(l)
    	// result: @l.Block (CMPBconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
    	for {
    		c := auxIntToInt8(v.AuxInt)
    		l := v_0
    		if l.Op != Op386MOVBload {
    			break
    		}
    		off := auxIntToInt32(l.AuxInt)
    		sym := auxToSym(l.Aux)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Apr 21 21:05:46 UTC 2023
    - 262.4K bytes
    - Viewed (0)
Back to top