- Sort Score
- Result 10 results
- Languages All
Results 1 - 7 of 7 for ANDQ (0.06 sec)
-
src/crypto/internal/edwards25519/field/fe_amd64.s
SHLQ $0x0d, DI, SI SHLQ $0x0d, R9, R8 SHLQ $0x0d, R11, R10 SHLQ $0x0d, R13, R12 SHLQ $0x0d, R15, R14 ANDQ AX, DI IMUL3Q $0x13, R14, R14 ADDQ R14, DI ANDQ AX, R9 ADDQ SI, R9 ANDQ AX, R11 ADDQ R8, R11 ANDQ AX, R13 ADDQ R10, R13 ANDQ AX, R15 ADDQ R12, R15 // Second reduction chain (carryPropagate) MOVQ DI, SI SHRQ $0x33, SI MOVQ R9, R8
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 5.7K bytes - Viewed (0) -
test/codegen/rotate.go
var a uint16 z &= 15 // amd64:"ROLW",-"ANDQ" // riscv64: "OR","SLL","SRL",-"AND\t" a += x<<z | x>>(16-z) // amd64:"RORW",-"ANDQ" // riscv64: "OR","SLL","SRL",-"AND\t" a += x>>z | x<<(16-z) return a } func rot8nc(x uint8, z uint) uint8 { var a uint8 z &= 7 // amd64:"ROLB",-"ANDQ" // riscv64: "OR","SLL","SRL",-"AND\t" a += x<<z | x>>(8-z)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Mar 07 14:57:07 UTC 2024 - 6K bytes - Viewed (0) -
test/codegen/math.go
// s390x:"LPDFR",-"LDEBR",-"LEDBR" (no float64 conversion) return float32(math.Abs(float64(x))) } // Check that it's using integer registers func copysign(a, b, c float64) { // amd64:"BTRQ\t[$]63","ANDQ","ORQ" // s390x:"CPSDR",-"MOVD" (no integer load/store) // ppc64x:"FCPSGN" // riscv64:"FSGNJD" // wasm:"F64Copysign" sink64[0] = math.Copysign(a, b) // amd64:"BTSQ\t[$]63"
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 04 15:24:29 UTC 2024 - 6.2K bytes - Viewed (0) -
src/runtime/sys_windows_amd64.s
TEXT runtime·asmstdcall_trampoline<ABIInternal>(SB),NOSPLIT,$0 MOVQ AX, CX JMP runtime·asmstdcall(SB) // void runtime·asmstdcall(void *c); TEXT runtime·asmstdcall(SB),NOSPLIT,$16 MOVQ SP, AX ANDQ $~15, SP // alignment as per Windows requirement MOVQ AX, 8(SP) MOVQ CX, 0(SP) // asmcgocall will put first argument into CX. MOVQ libcall_fn(CX), AX MOVQ libcall_args(CX), SI MOVQ libcall_n(CX), CX
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Feb 19 07:24:08 UTC 2024 - 8.4K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_amd64.s
MOVQ AX, ret+16(FP) RET // func And64(addr *uint64, v uint64) old uint64 TEXT ·And64(SB), NOSPLIT, $0-24 MOVQ ptr+0(FP), BX MOVQ val+8(FP), CX casloop: MOVQ CX, DX MOVQ (BX), AX ANDQ AX, DX LOCK CMPXCHGQ DX, (BX) JNZ casloop MOVQ AX, ret+16(FP) RET // func Anduintptr(addr *uintptr, v uintptr) old uintptr TEXT ·Anduintptr(SB), NOSPLIT, $0-24 JMP ·And64(SB)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 5.2K bytes - Viewed (0) -
src/crypto/internal/edwards25519/field/_asm/fe_amd64_asm.go
lo = r.lo SHLQ(Imm(64-51), r.lo, r.hi) r.lo, r.hi = nil, nil // make sure the uint128 is unusable return } // maskAndAdd sets r = r&mask + c*i. func maskAndAdd(r, mask, c GPVirtual, i uint64) { ANDQ(mask, r) if i != 1 { IMUL3Q(Imm(i), c, c) } ADDQ(c, r) } func mustAddr(c Component) Op { b, err := c.Resolve() if err != nil { panic(err) } return b.Addr
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 7.2K bytes - Viewed (0) -
test/codegen/bits.go
func bitoff64(a, b uint64) (n uint64) { // amd64:"BTRQ" n += b &^ (1 << (a & 63)) // amd64:"BTRQ\t[$]63" n += a &^ (1 << 63) // amd64:"BTRQ\t[$]60" n += a &^ (1 << 60) // amd64:"ANDQ\t[$]-2" n += a &^ (1 << 0) return n } func bitcompl64(a, b uint64) (n uint64) { // amd64:"BTCQ" n += b ^ (1 << (a & 63)) // amd64:"BTCQ\t[$]63" n += a ^ (1 << 63)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 19:02:52 UTC 2024 - 7.8K bytes - Viewed (0)