- Sort Score
- Result 10 results
- Languages All
Results 1 - 8 of 8 for ORQ (0.08 sec)
-
src/cmd/asm/internal/asm/testdata/amd64dynlinkerror.s
RET TEXT ·a9(SB), 0, $0-0 CMPL runtime·writeBarrier(SB), $0 ORQ R15, R15 // ERROR "when dynamic linking, R15 is clobbered by a global variable access and is used here" RET TEXT ·a10(SB), 0, $0-0 CMPL runtime·writeBarrier(SB), $0 JEQ one ORQ R15, R15 // ERROR "when dynamic linking, R15 is clobbered by a global variable access and is used here" one: RET
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Mar 15 20:45:41 UTC 2023 - 4.8K bytes - Viewed (0) -
src/math/hypot_amd64.s
ANDQ AX, CX // q = |q| MOVQ $PosInf, AX CMPQ AX, BX JLE isInfOrNaN CMPQ AX, CX JLE isInfOrNaN // hypot = max * sqrt(1 + (min/max)**2) MOVQ BX, X0 MOVQ CX, X1 ORQ CX, BX JEQ isZero MOVAPD X0, X2 MAXSD X1, X0 MINSD X2, X1 DIVSD X0, X1 MULSD X1, X1 ADDSD $1.0, X1 SQRTSD X1, X1 MULSD X1, X0 MOVSD X0, ret+16(FP) RET isInfOrNaN:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 15 15:48:19 UTC 2021 - 1.1K bytes - Viewed (0) -
src/math/dim_amd64.s
MOVQ $PosInf, AX MOVQ R8, BX ANDQ DX, BX // x = |x| CMPQ AX, BX JLT isMaxNaN MOVQ R9, CX ANDQ DX, CX // y = |y| CMPQ AX, CX JLT isMaxNaN // ±0 special cases ORQ CX, BX JEQ isMaxZero MOVQ R8, X0 MOVQ R9, X1 MAXSD X1, X0 MOVSD X0, ret+16(FP) RET isMaxNaN: // return NaN MOVQ $NaN, AX isPosInf: // return +Inf MOVQ AX, ret+16(FP)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 15 15:48:19 UTC 2021 - 1.9K bytes - Viewed (0) -
test/codegen/README
matched. For example, the following test: func TZ8(n uint8) int { // amd64:"BSFQ","ORQ\t\\$256" return bits.TrailingZeros8(n) } verifies that the code generated for a bits.TrailingZeros8 call on amd64 contains both a "BSFQ" instruction and an "ORQ $256". Note how the ORQ regex includes a tab char (\t). In the Go assembly syntax, operands are separated from opcodes by a tabulation.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jul 18 19:55:29 UTC 2023 - 5.2K bytes - Viewed (0) -
test/codegen/math.go
// s390x:"LPDFR",-"LDEBR",-"LEDBR" (no float64 conversion) return float32(math.Abs(float64(x))) } // Check that it's using integer registers func copysign(a, b, c float64) { // amd64:"BTRQ\t[$]63","ANDQ","ORQ" // s390x:"CPSDR",-"MOVD" (no integer load/store) // ppc64x:"FCPSGN" // riscv64:"FSGNJD" // wasm:"F64Copysign" sink64[0] = math.Copysign(a, b) // amd64:"BTSQ\t[$]63"
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 04 15:24:29 UTC 2024 - 6.2K bytes - Viewed (0) -
src/internal/bytealg/count_amd64.s
MOVQ R11, DI VMOVDQU (DI), Y2 VMOVDQU 32(DI), Y4 VPCMPEQB Y1, Y2, Y3 VPCMPEQB Y1, Y4, Y5 VPMOVMSKB Y3, DX VPMOVMSKB Y5, CX // Exit AVX mode. VZEROUPPER SALQ $32, CX ORQ CX, DX // Create mask to ignore overlap between previous 64 byte block // and the next. ANDQ $63, BX MOVQ $64, CX SUBQ BX, CX MOVQ $0xFFFFFFFFFFFFFFFF, R10 SALQ CL, R10 // Apply mask
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Oct 06 20:54:43 UTC 2023 - 4.7K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_amd64.s
MOVL AX, ret+16(FP) RET // func Or64(addr *uint64, v uint64) old uint64 TEXT ·Or64(SB), NOSPLIT, $0-24 MOVQ ptr+0(FP), BX MOVQ val+8(FP), CX casloop: MOVQ CX, DX MOVQ (BX), AX ORQ AX, DX LOCK CMPXCHGQ DX, (BX) JNZ casloop MOVQ AX, ret+16(FP) RET // func And64(addr *uint64, v uint64) old uint64 TEXT ·And64(SB), NOSPLIT, $0-24 MOVQ ptr+0(FP), BX MOVQ val+8(FP), CX
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 5.2K bytes - Viewed (0) -
test/codegen/bits.go
} func biton64(a, b uint64) (n uint64) { // amd64:"BTSQ" n += b | (1 << (a & 63)) // amd64:"BTSQ\t[$]63" n += a | (1 << 63) // amd64:"BTSQ\t[$]60" n += a | (1 << 60) // amd64:"ORQ\t[$]1" n += a | (1 << 0) return n } func bitoff64(a, b uint64) (n uint64) { // amd64:"BTRQ" n += b &^ (1 << (a & 63)) // amd64:"BTRQ\t[$]63" n += a &^ (1 << 63)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 19:02:52 UTC 2024 - 7.8K bytes - Viewed (0)