- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 12 for jnle (0.1 sec)
-
src/internal/bytealg/count_amd64.s
// Directly return DX, we don't need to accumulate // since we have <16 bytes. POPCNTL DX, DX MOVQ DX, (R8) RET avx2: #ifndef hasAVX2 CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1 JNE sse #endif MOVD AX, X0 LEAQ -64(SI)(BX*1), R11 LEAQ (SI)(BX*1), R13 VPBROADCASTB X0, Y1 PCALIGN $32 avx2_loop: VMOVDQU (DI), Y2 VMOVDQU 32(DI), Y4 VPCMPEQB Y1, Y2, Y3
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Oct 06 20:54:43 UTC 2023 - 4.7K bytes - Viewed (0) -
test/codegen/copy.go
func ptrEqual() { // amd64:-"JEQ",-"JNE" // ppc64x:-"BEQ",-"BNE" // s390x:-"BEQ",-"BNE" copy(x[:], x[:]) } func ptrOneOffset() { // amd64:-"JEQ",-"JNE" // ppc64x:-"BEQ",-"BNE" // s390x:-"BEQ",-"BNE" copy(x[1:], x[:]) } func ptrBothOffset() { // amd64:-"JEQ",-"JNE" // ppc64x:-"BEQ",-"BNE" // s390x:-"BEQ",-"BNE" copy(x[1:], x[2:]) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Sep 22 14:09:29 UTC 2023 - 3.1K bytes - Viewed (0) -
src/crypto/subtle/xor_amd64.s
aligned: MOVQ $0, AX // position in slices PCALIGN $16 loop16b: MOVOU (SI)(AX*1), X0 // XOR 16byte forwards. MOVOU (CX)(AX*1), X1 PXOR X1, X0 MOVOU X0, (BX)(AX*1) ADDQ $16, AX CMPQ DX, AX JNE loop16b RET PCALIGN $16 loop_1b: SUBQ $1, DX // XOR 1byte backwards. MOVB (SI)(DX*1), DI MOVB (CX)(DX*1), AX XORB AX, DI MOVB DI, (BX)(DX*1)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 26 18:14:32 UTC 2023 - 1.4K bytes - Viewed (0) -
src/runtime/time_windows_386.s
MOVL (_INTERRUPT_TIME+time_hi2), DI CMPL AX, DI JNE loop // w = DI:CX // multiply by 100 MOVL $100, AX MULL CX IMULL $100, DI ADDL DI, DX // w*100 = DX:AX MOVL AX, mono+12(FP) MOVL DX, mono+16(FP) wall: MOVL (_SYSTEM_TIME+time_hi1), CX MOVL (_SYSTEM_TIME+time_lo), AX MOVL (_SYSTEM_TIME+time_hi2), DX CMPL CX, DX JNE wall // w = DX:AX
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Sep 07 17:19:45 UTC 2023 - 1.7K bytes - Viewed (0) -
src/runtime/sys_windows_amd64.s
CMPL CX, $0; JE _0args CMPL CX, $1; JE _1args CMPL CX, $2; JE _2args CMPL CX, $3; JE _3args CMPL CX, $4; JE _4args // Check we have enough room for args. CMPL CX, $const_maxArgs JLE 2(PC) INT $3 // not enough room -> crash // Copy args to the stack. MOVQ SP, DI CLD REP; MOVSQ MOVQ SP, SI // Load first 4 args into correspondent registers.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Feb 19 07:24:08 UTC 2024 - 8.4K bytes - Viewed (0) -
src/math/big/arith_amd64.s
MOVQ R12, 8(R10)(SI*8) MOVQ R13, 16(R10)(SI*8) MOVQ R14, 24(R10)(SI*8) SBBQ CX, CX // save CF ADDQ $4, SI // i += 4 SUBQ $4, DI // n -= 4 JGE U1 // if n >= 0 goto U1 V1: ADDQ $4, DI // n += 4 JLE E1 // if n <= 0 goto E1 L1: // n > 0 ADDQ CX, CX // restore CF MOVQ 0(R8)(SI*8), R11 ADCQ 0(R9)(SI*8), R11 MOVQ R11, 0(R10)(SI*8) SBBQ CX, CX // save CF ADDQ $1, SI // i++ SUBQ $1, DI // n--
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 19 23:33:27 UTC 2023 - 9.1K bytes - Viewed (0) -
src/internal/bytealg/equal_amd64.s
// CX = size (want in BX) CMPQ AX, BX JNE neq MOVQ $1, AX // return 1 RET neq: MOVQ AX, SI MOVQ BX, DI MOVQ CX, BX JMP memeqbody<>(SB) // memequal_varlen(a, b unsafe.Pointer) bool TEXT runtime·memequal_varlen<ABIInternal>(SB),NOSPLIT,$0-17 // AX = a (want in SI) // BX = b (want in DI) // 8(DX) = size (want in BX) CMPQ AX, BX JNE neq MOVQ $1, AX // return 1 RET neq:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Nov 17 16:34:40 UTC 2023 - 2.8K bytes - Viewed (0) -
src/internal/bytealg/indexbyte_amd64.s
BSFL DX, DX // Find first set bit. JZ failure // No set bit, failure. MOVQ DX, (R8) RET avx2: #ifndef hasAVX2 CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1 JNE sse #endif MOVD AX, X0 LEAQ -32(SI)(BX*1), R11 VPBROADCASTB X0, Y1 PCALIGN $32 avx2_loop: VMOVDQU (DI), Y2 VPCMPEQB Y1, Y2, Y3 VPTEST Y3, Y3 JNZ avx2success ADDQ $32, DI
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Nov 01 19:06:01 UTC 2023 - 3.1K bytes - Viewed (0) -
src/internal/bytealg/index_amd64.s
JB loop33to63 fail_avx2: VZEROUPPER fail: MOVQ $-1, (R11) RET success_avx2: VZEROUPPER JMP success sse42: #ifndef hasSSE42 CMPB internal∕cpu·X86+const_offsetX86HasSSE42(SB), $1 JNE no_sse42 #endif CMPQ AX, $12 // PCMPESTRI is slower than normal compare, // so using it makes sense only if we advance 4+ bytes per compare // This value was determined experimentally and is the ~same
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Aug 07 00:20:48 UTC 2023 - 5.1K bytes - Viewed (0) -
src/runtime/sys_windows_386.s
MOVL DX, 0(CX)(FS) RET TEXT runtime·nanotime1(SB),NOSPLIT,$0-8 loop: MOVL (_INTERRUPT_TIME+time_hi1), AX MOVL (_INTERRUPT_TIME+time_lo), CX MOVL (_INTERRUPT_TIME+time_hi2), DI CMPL AX, DI JNE loop // wintime = DI:CX, multiply by 100 MOVL $100, AX MULL CX IMULL $100, DI ADDL DI, DX // wintime*100 = DX:AX MOVL AX, ret_lo+0(FP) MOVL DX, ret_hi+4(FP) RET
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Sep 21 15:56:43 UTC 2023 - 6.5K bytes - Viewed (0)