Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 87 for cmpb (1.81 sec)

  1. src/internal/bytealg/indexbyte_ppc64x.s

    	RET
    
    cmp4:	// Length 4 - 7
    	CMPU	R4,$4
    	BLT	cmp2
    	MOVD	$-4,R11
    	ADD	$-4,R4,R4
    
    	_LWBEX	(R0)(R3),R10
    	_LWBEX	(R11)(R9),R11
    	CMPB	R10,R5,R10
    	CMPB	R11,R5,R11
    	CNTLZW	R10,R10
    	CNTLZW	R11,R11
    	CMPU	R10,$32
    	CMPU	R11,$32,CR1
    	SRD	$3,R10,R3
    	SRD	$3,R11,R11
    	BNE	found
    
    	ADD	R4,R11,R4
    	MOVD	$-1,R3
    	ISEL	CR1EQ,R3,R4,R3
    	RET
    
    cmp2:	// Length 2 - 3
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Apr 21 16:10:29 UTC 2023
    - 6.3K bytes
    - Viewed (0)
  2. src/internal/bytealg/count_ppc64x.s

    #else
    tail:	// Count the last 0 - 31 bytes.
    	CMP	R4, $16
    	BLT	tail_8
    	MOVD	(R3), R12
    	MOVD	8(R3), R14
    	CMPB	R12, R5, R12
    	CMPB	R14, R5, R14
    	POPCNTD	R12, R12
    	POPCNTD	R14, R14
    	ADD	R12, R18, R18
    	ADD	R14, R18, R18
    	ADD	$16, R3, R3
    	ADD	$-16, R4, R4
    
    tail_8:	// Count the remaining 0 - 15 bytes.
    	CMP	R4, $8
    	BLT	tail_4
    	MOVD	(R3), R12
    	CMPB	R12, R5, R12
    	POPCNTD	R12, R12
    	ADD	R12, R18, R18
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Aug 14 20:30:44 UTC 2023
    - 3.6K bytes
    - Viewed (0)
  3. src/internal/bytealg/equal_amd64.s

    //   b in DI
    //   count in BX
    // Output:
    //   result in AX
    TEXT memeqbody<>(SB),NOSPLIT,$0-0
    	CMPQ	BX, $8
    	JB	small
    	CMPQ	BX, $64
    	JB	bigloop
    #ifndef hasAVX2
    	CMPB	internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1
    	JE	hugeloop_avx2
    
    	// 64 bytes at a time using xmm registers
    	PCALIGN $16
    hugeloop:
    	CMPQ	BX, $64
    	JB	bigloop
    	MOVOU	(SI), X0
    	MOVOU	(DI), X1
    	MOVOU	16(SI), X2
    	MOVOU	16(DI), X3
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Nov 17 16:34:40 UTC 2023
    - 2.8K bytes
    - Viewed (0)
  4. src/internal/bytealg/equal_386.s

    	MOVL	-4(DI)(BX*1), DX
    	CMPL	CX, DX
    	SETEQ	(AX)
    	RET
    
    small:
    	CMPL	BX, $0
    	JEQ	equal
    
    	LEAL	0(BX*8), CX
    	NEGL	CX
    
    	MOVL	SI, DX
    	CMPB	DX, $0xfc
    	JA	si_high
    
    	// load at SI won't cross a page boundary.
    	MOVL	(SI), SI
    	JMP	si_finish
    si_high:
    	// address ends in 111111xx. Load up to bytes we want, move to correct position.
    	MOVL	-4(SI)(BX*1), SI
    	SHRL	CX, SI
    si_finish:
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Aug 23 21:22:58 UTC 2021
    - 2.1K bytes
    - Viewed (0)
  5. src/internal/bytealg/count_amd64.s

    #include "textflag.h"
    
    TEXT ·Count(SB),NOSPLIT,$0-40
    #ifndef hasPOPCNT
    	CMPB	internal∕cpu·X86+const_offsetX86HasPOPCNT(SB), $1
    	JEQ	2(PC)
    	JMP	·countGeneric(SB)
    #endif
    	MOVQ	b_base+0(FP), SI
    	MOVQ	b_len+8(FP), BX
    	MOVB	c+24(FP), AL
    	LEAQ	ret+32(FP), R8
    	JMP	countbody<>(SB)
    
    TEXT ·CountString(SB),NOSPLIT,$0-32
    #ifndef hasPOPCNT
    	CMPB	internal∕cpu·X86+const_offsetX86HasPOPCNT(SB), $1
    	JEQ	2(PC)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Oct 06 20:54:43 UTC 2023
    - 4.7K bytes
    - Viewed (0)
  6. src/internal/bytealg/compare_386.s

    	XORL	DX, DX
    	MOVB	(SI)(BX*1), CX
    	CMPB	CX, (DI)(BX*1)
    	SETHI	DX
    	LEAL	-1(DX*2), DX	// convert 1/0 to +1/-1
    	MOVL	DX, (AX)
    	RET
    
    mediumloop:
    	CMPL	BP, $4
    	JBE	_0through4
    	MOVL	(SI), BX
    	MOVL	(DI), CX
    	CMPL	BX, CX
    	JNE	diff4
    	ADDL	$4, SI
    	ADDL	$4, DI
    	SUBL	$4, BP
    	JMP	mediumloop
    
    _0through4:
    	MOVL	-4(SI)(BP*1), BX
    	MOVL	-4(DI)(BP*1), CX
    	CMPL	BX, CX
    	JEQ	allsame
    
    diff4:
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Aug 23 21:22:58 UTC 2021
    - 2.6K bytes
    - Viewed (0)
  7. src/internal/bytealg/compare_amd64.s

    TEXT cmpbody<>(SB),NOSPLIT,$0-0
    	CMPQ	SI, DI
    	JEQ	allsame
    	CMPQ	BX, DX
    	MOVQ	DX, R8
    	CMOVQLT	BX, R8 // R8 = min(alen, blen) = # of bytes to compare
    	CMPQ	R8, $8
    	JB	small
    
    	CMPQ	R8, $63
    	JBE	loop
    #ifndef hasAVX2
    	CMPB	internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1
    	JEQ     big_loop_avx2
    	JMP	big_loop
    #else
    	JMP	big_loop_avx2
    #endif
    loop:
    	CMPQ	R8, $16
    	JBE	_0through16
    	MOVOU	(SI), X0
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Aug 18 17:17:01 UTC 2022
    - 4.3K bytes
    - Viewed (0)
  8. test/codegen/memops.go

    var x32 [2]uint32
    var x64 [2]uint64
    
    func compMem1() int {
    	// amd64:`CMPB\tcommand-line-arguments.x\+1\(SB\), [$]0`
    	if x[1] {
    		return 1
    	}
    	// amd64:`CMPB\tcommand-line-arguments.x8\+1\(SB\), [$]7`
    	if x8[1] == 7 {
    		return 1
    	}
    	// amd64:`CMPW\tcommand-line-arguments.x16\+2\(SB\), [$]7`
    	if x16[1] == 7 {
    		return 1
    	}
    	// amd64:`CMPL\tcommand-line-arguments.x32\+4\(SB\), [$]7`
    	if x32[1] == 7 {
    		return 1
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Aug 04 16:40:24 UTC 2023
    - 12.5K bytes
    - Viewed (0)
  9. src/runtime/memclr_amd64.s

    	TESTQ	BX, BX
    	JEQ	_0
    	CMPQ	BX, $2
    	JBE	_1or2
    	CMPQ	BX, $4
    	JBE	_3or4
    	CMPQ	BX, $8
    	JB	_5through7
    	JE	_8
    	CMPQ	BX, $16
    	JBE	_9through16
    	CMPQ	BX, $32
    	JBE	_17through32
    	CMPQ	BX, $64
    	JBE	_33through64
    	CMPQ	BX, $128
    	JBE	_65through128
    	CMPQ	BX, $256
    	JBE	_129through256
    
    	CMPB	internal∕cpu·X86+const_offsetX86HasERMS(SB), $1 // enhanced REP MOVSB/STOSB
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue May 10 20:52:34 UTC 2022
    - 4.9K bytes
    - Viewed (0)
  10. src/cmd/compile/internal/ssa/_gen/AMD64splitload.rules

    (CMP(Q|L|W|B)loadidx1 {sym} [off] ptr idx x mem) => (CMP(Q|L|W|B) (MOV(Q|L|W|B)loadidx1 {sym} [off] ptr idx mem) x)
    (CMPQloadidx8 {sym} [off] ptr idx x mem) => (CMPQ (MOVQloadidx8 {sym} [off] ptr idx mem) x)
    (CMPLloadidx4 {sym} [off] ptr idx x mem) => (CMPL (MOVLloadidx4 {sym} [off] ptr idx mem) x)
    (CMPWloadidx2 {sym} [off] ptr idx x mem) => (CMPW (MOVWloadidx2 {sym} [off] ptr idx mem) x)
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Oct 04 19:35:46 UTC 2022
    - 3.4K bytes
    - Viewed (0)
Back to top