- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 49 for JNE (0.08 sec)
-
src/runtime/time_linux_amd64.s
MOVQ CX, 0(SP) MOVQ DX, 8(SP) LEAQ sec+0(FP), DX MOVQ -8(DX), CX // Sets CX to function return address. MOVQ CX, m_vdsoPC(BX) MOVQ DX, m_vdsoSP(BX) CMPQ R14, m_curg(BX) // Only switch if on curg. JNE noswitch MOVQ m_g0(BX), DX MOVQ (g_sched+gobuf_sp)(DX), SP // Set SP to g0 stack noswitch: SUBQ $32, SP // Space for two time results ANDQ $~15, SP // Align for C code MOVL $0, DI // CLOCK_REALTIME
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat Nov 06 10:24:44 UTC 2021 - 2K bytes - Viewed (0) -
src/internal/bytealg/indexbyte_amd64.s
BSFL DX, DX // Find first set bit. JZ failure // No set bit, failure. MOVQ DX, (R8) RET avx2: #ifndef hasAVX2 CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1 JNE sse #endif MOVD AX, X0 LEAQ -32(SI)(BX*1), R11 VPBROADCASTB X0, Y1 PCALIGN $32 avx2_loop: VMOVDQU (DI), Y2 VPCMPEQB Y1, Y2, Y3 VPTEST Y3, Y3 JNZ avx2success ADDQ $32, DI
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Nov 01 19:06:01 UTC 2023 - 3.1K bytes - Viewed (0) -
src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s
TEXT polyHashADInternal<>(SB), NOSPLIT, $0 // adp points to beginning of additional data // itr2 holds ad length XORQ acc0, acc0 XORQ acc1, acc1 XORQ acc2, acc2 CMPQ itr2, $13 JNE hashADLoop openFastTLSAD: // Special treatment for the TLS case of 13 bytes MOVQ (adp), acc0 MOVQ 5(adp), acc1 SHRQ $24, acc1 MOVQ $1, acc2 polyMul RET hashADLoop:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Nov 29 21:28:33 UTC 2023 - 105.6K bytes - Viewed (0) -
src/cmd/internal/obj/x86/obj6.go
p.To.Type = obj.TYPE_REG p.To.Reg = regEntryTmp0 if ctxt.Arch.Family == sys.I386 { p.As = ATESTL } // JNE checkargp (checkargp to be resolved later) jne := obj.Appendp(p, newprog) jne.As = AJNE jne.To.Type = obj.TYPE_BRANCH // end: // NOP end := obj.Appendp(jne, newprog) end.As = obj.ANOP // Fast forward to end of function. var last *obj.Prog
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Sep 08 18:36:45 UTC 2023 - 40.9K bytes - Viewed (0) -
src/runtime/memmove_386.s
*/ CMPL SI, DI JLS back /* * forward copy loop */ forward: // If REP MOVSB isn't fast, don't use it CMPB internal∕cpu·X86+const_offsetX86HasERMS(SB), $1 // enhanced REP MOVSB/STOSB JNE fwdBy4 // Check alignment MOVL SI, AX ORL DI, AX TESTL $3, AX JEQ fwdBy4 // Do 1 byte at a time MOVL BX, CX REP; MOVSB RET fwdBy4: // Do 4 bytes at a time MOVL BX, CX
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat Nov 06 10:24:44 UTC 2021 - 4.4K bytes - Viewed (0) -
src/internal/bytealg/count_amd64.s
// Directly return DX, we don't need to accumulate // since we have <16 bytes. POPCNTL DX, DX MOVQ DX, (R8) RET avx2: #ifndef hasAVX2 CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1 JNE sse #endif MOVD AX, X0 LEAQ -64(SI)(BX*1), R11 LEAQ (SI)(BX*1), R13 VPBROADCASTB X0, Y1 PCALIGN $32 avx2_loop: VMOVDQU (DI), Y2 VMOVDQU 32(DI), Y4 VPCMPEQB Y1, Y2, Y3
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Oct 06 20:54:43 UTC 2023 - 4.7K bytes - Viewed (0) -
src/math/exp_amd64.s
JGE overflow lastStep: SHLQ $52, BX MOVQ BX, X1 MULSD X1, X0 MOVSD X0, ret+8(FP) RET notFinite: // test bits for -Inf MOVQ $NegInf, AX CMPQ AX, BX JNE notNegInf // -Inf, return 0 underflow: // return 0 MOVQ $0, ret+8(FP) RET overflow: // return +Inf MOVQ $PosInf, BX notNegInf: // NaN or +Inf, return x MOVQ BX, ret+8(FP) RET
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 15 15:48:19 UTC 2021 - 4.2K bytes - Viewed (0) -
src/runtime/sys_plan9_386.s
CALL runtime·exits(SB) JMP 0(PC) // void sigtramp(void *ureg, int8 *note) TEXT runtime·sigtramp(SB),NOSPLIT,$0 get_tls(AX) // check that g exists MOVL g(AX), BX CMPL BX, $0 JNE 3(PC) CALL runtime·badsignal2(SB) // will exit RET // save args MOVL ureg+0(FP), CX MOVL note+4(FP), DX // change stack MOVL g_m(BX), BX MOVL m_gsignal(BX), BP
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 21 22:12:04 UTC 2021 - 4.5K bytes - Viewed (0) -
src/internal/bytealg/index_amd64.s
JB loop33to63 fail_avx2: VZEROUPPER fail: MOVQ $-1, (R11) RET success_avx2: VZEROUPPER JMP success sse42: #ifndef hasSSE42 CMPB internal∕cpu·X86+const_offsetX86HasSSE42(SB), $1 JNE no_sse42 #endif CMPQ AX, $12 // PCMPESTRI is slower than normal compare, // so using it makes sense only if we advance 4+ bytes per compare // This value was determined experimentally and is the ~same
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Aug 07 00:20:48 UTC 2023 - 5.1K bytes - Viewed (0) -
src/runtime/sys_plan9_amd64.s
TEXT runtime·settls(SB),NOSPLIT,$0 RET // void sigtramp(void *ureg, int8 *note) TEXT runtime·sigtramp(SB),NOSPLIT|NOFRAME,$0 get_tls(AX) // check that g exists MOVQ g(AX), BX CMPQ BX, $0 JNE 3(PC) CALL runtime·badsignal2(SB) // will exit RET // save args MOVQ ureg+0(FP), CX MOVQ note+8(FP), DX // change stack MOVQ g_m(BX), BX MOVQ m_gsignal(BX), R10
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Mar 01 16:41:22 UTC 2023 - 4.6K bytes - Viewed (0)