- Sort Score
- Result 10 results
- Languages All
Results 1 - 6 of 6 for ANDQ (0.18 sec)
-
src/crypto/sha512/sha512block_amd64.s
#define SHA512T2(a, b, c) \ MOVQ a, DI; \ MOVQ c, BX; \ RORQ $28, DI; \ MOVQ a, DX; \ ANDQ b, BX; \ RORQ $34, DX; \ MOVQ a, CX; \ ANDQ c, CX; \ XORQ DX, DI; \ XORQ CX, BX; \ MOVQ a, DX; \ MOVQ b, CX; \ RORQ $39, DX; \ ANDQ a, CX; \ XORQ CX, BX; \ XORQ DX, DI; \ ADDQ DI, BX // Calculate T1 and T2, then e = d + T1 and a = T1 + T2.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 27K bytes - Viewed (0) -
src/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Apr 10 16:37:53 UTC 2024 - 14.2K bytes - Viewed (0) -
test/codegen/shift.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 21 18:53:43 UTC 2024 - 12.7K bytes - Viewed (0) -
src/runtime/race_amd64.s
MOVQ g_m(R14), R13 // Switch to g0 stack. MOVQ SP, R12 // callee-saved, preserved across the CALL MOVQ m_g0(R13), R10 CMPQ R10, R14 JE call // already on g0 MOVQ (g_sched+gobuf_sp)(R10), SP call: ANDQ $~15, SP // alignment for gcc ABI CALL AX MOVQ R12, SP // Back to Go world, set special registers. // The g register (R14) is preserved in C. XORPS X15, X15 RET
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:37:29 UTC 2024 - 15.1K bytes - Viewed (0) -
src/crypto/internal/nistec/p256_asm_amd64.s
SBBQ $0, mul0 MOVQ acc4, acc0 MOVQ acc5, acc1 MOVQ acc6, acc2 MOVQ acc7, acc3 ADDQ $-1, acc4 ADCQ p256const0<>(SB), acc5 ADCQ $0, acc6 ADCQ p256const1<>(SB), acc7 ANDQ $1, mul0 CMOVQEQ acc0, acc4 CMOVQEQ acc1, acc5 CMOVQEQ acc2, acc6 CMOVQEQ acc3, acc7 RET /* ---------------------------------------*/ TEXT p256MulInternal(SB),NOSPLIT,$8
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 39.8K bytes - Viewed (0) -
test/codegen/comparisons.go
// amd64:`TESTL`,-`ANDL` c0 := a&b < 0 // arm:`CMN`,-`ADD` // arm64:`CMNW`,-`ADD` c1 := a+b < 0 // arm:`TEQ`,-`XOR` c2 := a^b < 0 // arm64:`TST`,-`AND` // amd64:`TESTQ`,-`ANDQ` c3 := e&f < 0 // arm64:`CMN`,-`ADD` c4 := e+f < 0 // not optimized to single CMNW/CMN due to further use of b+d // arm64:`ADD`,-`CMNW` // arm:`ADD`,-`CMN` c5 := b+d == 0
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 19 16:31:02 UTC 2024 - 15.2K bytes - Viewed (0)