Search Options

Results per page
Sort
Preferred Languages
Advance

Results 61 - 70 of 465 for movbe (0.07 sec)

  1. src/runtime/memmove_mipsx.s

    	JMP	f_words_ua
    
    f_tail_ua:
    	BEQ	R1, R5, ret
    	MOVWHI	-4(R4), R8
    	MOVWLO	-1(R4), R8
    	MOVWLO	R8, -1(R5)
    	JMP	ret
    
    f_small_copy:
    	BEQ	R1, R5, ret
    	ADDU	$1, R1
    	MOVB	0(R2), R6
    	ADDU	$1, R2
    	MOVB	R6, -1(R1)
    	JMP	f_small_copy
    
    backward:
    	SGTU	$4, R3, R6
    	BNE	R6, b_small_copy
    
    	AND	$3, R5, R6
    	BEQ	R6, b_dest_aligned
    	MOVWHI	-4(R4), R7
    	SUBU	R6, R3
    	MOVWLO	-1(R4), R7
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sat Nov 06 10:24:44 UTC 2021
    - 4.4K bytes
    - Viewed (0)
  2. src/internal/bytealg/compare_amd64.s

    	JMP	diff16
    diff48:
    	ADDQ	$32, SI
    	ADDQ	$32, DI
    	JMP	diff16
    diff32:
    	ADDQ	$16, SI
    	ADDQ	$16, DI
    	// AX = bit mask of differences
    diff16:
    	BSFQ	AX, BX	// index of first byte that differs
    	XORQ	AX, AX
    	MOVB	(SI)(BX*1), CX
    	CMPB	CX, (DI)(BX*1)
    	SETHI	AX
    	LEAQ	-1(AX*2), AX	// convert 1/0 to +1/-1
    	RET
    
    	// 0 through 16 bytes left, alen>=8, blen>=8
    _0through16:
    	CMPQ	R8, $8
    	JBE	_0through8
    	MOVQ	(SI), AX
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Aug 18 17:17:01 UTC 2022
    - 4.3K bytes
    - Viewed (0)
  3. src/internal/bytealg/indexbyte_arm64.s

    #include "textflag.h"
    
    TEXT ·IndexByte(SB),NOSPLIT,$0-40
    	MOVD	b_base+0(FP), R0
    	MOVD	b_len+8(FP), R2
    	MOVBU	c+24(FP), R1
    	MOVD	$ret+32(FP), R8
    	B	indexbytebody<>(SB)
    
    TEXT ·IndexByteString(SB),NOSPLIT,$0-32
    	MOVD	s_base+0(FP), R0
    	MOVD	s_len+8(FP), R2
    	MOVBU	c+16(FP), R1
    	MOVD	$ret+24(FP), R8
    	B	indexbytebody<>(SB)
    
    // input:
    //   R0: data
    //   R1: byte to search
    //   R2: data len
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Nov 08 20:52:47 UTC 2018
    - 3.3K bytes
    - Viewed (0)
  4. src/runtime/tls_riscv64.s

    #include "funcdata.h"
    #include "textflag.h"
    
    // If !iscgo, this is a no-op.
    //
    // NOTE: mcall() assumes this clobbers only X31 (REG_TMP).
    TEXT runtime·save_g(SB),NOSPLIT|NOFRAME,$0-0
    #ifndef GOOS_openbsd
    	MOVB	runtime·iscgo(SB), X31
    	BEQZ	X31, nocgo
    #endif
    	MOV	g, runtime·tls_g(SB)
    nocgo:
    	RET
    
    TEXT runtime·load_g(SB),NOSPLIT|NOFRAME,$0-0
    	MOV	runtime·tls_g(SB), g
    	RET
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Oct 04 02:55:17 UTC 2023
    - 615 bytes
    - Viewed (0)
  5. src/runtime/cgo/asm_arm.s

    	// Finally, save the link register R14. This also puts the
    	// arguments we pushed for cgocallback where they need to be,
    	// starting at 4(R13).
    	MOVW.W	R14, -4(R13)
    
    	// Skip floating point registers if goarmsoftfp!=0.
    	MOVB    runtime·goarmsoftfp(SB), R11
    	CMP     $0, R11
    	BNE     skipfpsave
    	MOVD	F8, (13*4+8*1)(R13)
    	MOVD	F9, (13*4+8*2)(R13)
    	MOVD	F10, (13*4+8*3)(R13)
    	MOVD	F11, (13*4+8*4)(R13)
    	MOVD	F12, (13*4+8*5)(R13)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Nov 20 17:19:36 UTC 2023
    - 2.2K bytes
    - Viewed (0)
  6. src/internal/runtime/atomic/atomic_amd64.s

    	JMP	·Store64(SB)
    
    // void	·Or8(byte volatile*, byte);
    TEXT ·Or8(SB), NOSPLIT, $0-9
    	MOVQ	ptr+0(FP), AX
    	MOVB	val+8(FP), BX
    	LOCK
    	ORB	BX, (AX)
    	RET
    
    // void	·And8(byte volatile*, byte);
    TEXT ·And8(SB), NOSPLIT, $0-9
    	MOVQ	ptr+0(FP), AX
    	MOVB	val+8(FP), BX
    	LOCK
    	ANDB	BX, (AX)
    	RET
    
    // func Or(addr *uint32, v uint32)
    TEXT ·Or(SB), NOSPLIT, $0-12
    	MOVQ	ptr+0(FP), AX
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 5.2K bytes
    - Viewed (0)
  7. src/cmd/internal/obj/arm64/anames.go

    	"LDPSW",
    	"LDPW",
    	"LDXP",
    	"LDXPW",
    	"LDXR",
    	"LDXRB",
    	"LDXRH",
    	"LDXRW",
    	"LSL",
    	"LSLW",
    	"LSR",
    	"LSRW",
    	"MADD",
    	"MADDW",
    	"MNEG",
    	"MNEGW",
    	"MOVB",
    	"MOVBU",
    	"MOVD",
    	"MOVH",
    	"MOVHU",
    	"MOVK",
    	"MOVKW",
    	"MOVN",
    	"MOVNW",
    	"MOVP",
    	"MOVPD",
    	"MOVPQ",
    	"MOVPS",
    	"MOVPSW",
    	"MOVPW",
    	"MOVW",
    	"MOVWU",
    	"MOVZ",
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 18 01:40:37 UTC 2023
    - 5.4K bytes
    - Viewed (0)
  8. src/runtime/tls_loong64.s

    #include "go_asm.h"
    #include "go_tls.h"
    #include "funcdata.h"
    #include "textflag.h"
    
    // If !iscgo, this is a no-op.
    //
    // NOTE: mcall() assumes this clobbers only R30 (REGTMP).
    TEXT runtime·save_g(SB),NOSPLIT|NOFRAME,$0-0
    	MOVB	runtime·iscgo(SB), R30
    	BEQ	R30, nocgo
    
    	MOVV	g, runtime·tls_g(SB)
    
    nocgo:
    	RET
    
    TEXT runtime·load_g(SB),NOSPLIT|NOFRAME,$0-0
    	MOVV	runtime·tls_g(SB), g
    	RET
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue May 17 20:18:25 UTC 2022
    - 589 bytes
    - Viewed (0)
  9. src/crypto/aes/asm_s390x.s

    loop:
    	MOVD	0(R2)(R5*1), R7
    	MOVD	0(R3)(R5*1), R8
    	XOR	R7, R8
    	MOVD	R8, 0(R1)(R5*1)
    	LAY	8(R5), R5
    	SUB	$8, R4
    	CMPBGE	R4, $8, loop
    tail:
    	CMPBEQ	R4, $0, done
    	MOVB	0(R2)(R5*1), R7
    	MOVB	0(R3)(R5*1), R8
    	XOR	R7, R8
    	MOVB	R8, 0(R1)(R5*1)
    	LAY	1(R5), R5
    	SUB	$1, R4
    	BR	tail
    done:
    	RET
    
    // func cryptBlocksGCM(fn code, key, dst, src, buf []byte, cnt *[16]byte)
    TEXT ·cryptBlocksGCM(SB),NOSPLIT,$0-112
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 04 17:29:44 UTC 2024
    - 4.4K bytes
    - Viewed (0)
  10. test/fixedbugs/issue4396a.go

    // Use of this source code is governed by a BSD-style
    // license that can be found in the LICENSE file.
    
    // Issue 4396. Arrays of bytes are not required to be
    // word aligned. 5g should use MOVB to load the address
    // of s.g[0] for its nil check.
    //
    // This test _may_ fail on arm, but requires the host to 
    // trap unaligned loads. This is generally done with
    //
    // echo "4" > /proc/cpu/alignment
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon May 02 13:43:18 UTC 2016
    - 627 bytes
    - Viewed (0)
Back to top