- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 44 for dcmpu (0.04 sec)
-
src/cmd/internal/obj/s390x/a.out.go
ACELFBR ACDLFBR ACELGBR ACDLGBR // convert from float/float64 to uint32/uint64 ACLFEBR ACLFDBR ACLGEBR ACLGDBR // compare ACMP ACMPU ACMPW ACMPWU // test under mask ATMHH ATMHL ATMLH ATMLL // insert program mask AIPM // set program mask ASPM // compare and swap ACS ACSG
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Sep 05 16:41:03 UTC 2023 - 12.4K bytes - Viewed (0) -
src/cmd/internal/obj/ppc64/a.out.go
ABVS // Branch if float unordered (also branch on summary overflow) ABDNZ // Decrement CTR, and branch if CTR != 0 ABDZ // Decrement CTR, and branch if CTR == 0 ACMP ACMPU ACMPEQB ACNTLZW ACNTLZWCC ACRAND ACRANDN ACREQV ACRNAND ACRNOR ACROR ACRORN ACRXOR ADIVW ADIVWCC ADIVWVCC ADIVWV ADIVWU ADIVWUCC ADIVWUVCC
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 01 18:50:29 UTC 2024 - 16K bytes - Viewed (0) -
src/crypto/aes/gcm_ppc64x.s
LXVD2X (HTBL)(R0), VXC2 #ifdef GOARCH_ppc64le LVSL (R0)(R0), LEMASK VSPLTISB $0x07, T0 VXOR LEMASK, T0, LEMASK VPERM XL, XL, LEMASK, XL #endif VXOR ZERO, ZERO, ZERO CMPU LEN, $64 BGE gcm_ghash_p8_4x LXVD2X (INP)(R0), VIN ADD $16, INP, INP SUBCCC $16, LEN, LEN #ifdef GOARCH_ppc64le VPERM IN, IN, LEMASK, IN #endif VXOR IN, XL, IN BEQ short
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 27.1K bytes - Viewed (0) -
src/cmd/internal/notsha256/sha256block_ppc64x.s
VADDUWM V13, V3, V3 XXLOR VS29, VS29, V15 VADDUWM V14, V4, V4 XXLOR VS30, VS30, V16 VADDUWM V15, V5, V5 XXLOR VS31, VS31, V17 VADDUWM V16, V6, V6 VADDUWM V17, V7, V7 CMPU INP, END BLT loop LVX (TBL)(R_x000), V8 VPERM V0, V1, KI, V0 LVX (TBL)(R_x010), V9 VPERM V4, V5, KI, V4 VPERM V0, V2, V8, V0 VPERM V4, V6, V8, V4 VPERM V0, V3, V9, V0
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 19 23:33:27 UTC 2023 - 14.5K bytes - Viewed (0) -
src/crypto/sha256/sha256block_ppc64x.s
VADDUWM V13, V3, V3 XXLOR VS29, VS29, V15 VADDUWM V14, V4, V4 XXLOR VS30, VS30, V16 VADDUWM V15, V5, V5 XXLOR VS31, VS31, V17 VADDUWM V16, V6, V6 VADDUWM V17, V7, V7 CMPU INP, END BLT loop LVX (TBL)(R_x000), V8 VPERM V0, V1, KI, V0 LVX (TBL)(R_x010), V9 VPERM V4, V5, KI, V4 VPERM V0, V2, V8, V0 VPERM V4, V6, V8, V4 VPERM V0, V3, V9, V0
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 14.4K bytes - Viewed (0) -
src/crypto/sha512/sha512block_ppc64x.s
XXLOR VS31, VS31, V17 VADDUDM V10, V0, V0 VADDUDM V11, V1, V1 VADDUDM V12, V2, V2 VADDUDM V13, V3, V3 VADDUDM V14, V4, V4 VADDUDM V15, V5, V5 VADDUDM V16, V6, V6 VADDUDM V17, V7, V7 CMPU INP, END BLT loop #ifdef GOARCH_ppc64le VPERM V0, V1, KI, V0 VPERM V2, V3, KI, V2 VPERM V4, V5, KI, V4 VPERM V6, V7, KI, V6 #else VPERM V1, V0, KI, V0 VPERM V3, V2, KI, V2
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 15.8K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/rewritePPC64.go
v0.AuxInt = int64ToAuxInt(c) v0.AddArg(y) v.AddArg(v0) return true } // match: (CMPU x y) // cond: canonLessThan(x,y) // result: (InvertFlags (CMPU y x)) for { x := v_0 y := v_1 if !(canonLessThan(x, y)) { break } v.reset(OpPPC64InvertFlags) v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags) v0.AddArg2(y, x)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 19:02:52 UTC 2024 - 360.2K bytes - Viewed (0) -
src/hash/crc32/crc32_ppc64le.s
VSLDOI $8,zeroes,V8,V8 // or: VSLDOI V29,V8,V27,4 for top 32 bits? #else VSLDOI $4,V8,zeroes,V8 #endif #ifdef BYTESWAP_DATA MOVD $·byteswapcons(SB),R3 LVX (R3),byteswap #endif CMPU R5,$256 // length of bytes BLT short RLDICR $0,R5,$56,R6 // chunk to process // First step for larger sizes l1: MOVD $32768,R7 MOVD R7,R9 CMP R6,R7 // compare R6, R7 (MAX SIZE)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 06 12:09:50 UTC 2024 - 13.1K bytes - Viewed (0) -
src/cmd/internal/obj/ppc64/asm9.go
{as: ACMP, a1: C_REG, a6: C_S16CON, type_: 70, size: 4}, {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_S16CON, type_: 70, size: 4}, {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4}, {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4}, {as: ACMPU, a1: C_REG, a6: C_U16CON, type_: 70, size: 4}, {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_U16CON, type_: 70, size: 4}, {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 13:55:28 UTC 2024 - 156.1K bytes - Viewed (0) -
src/math/big/arith_ppc64x.s
BEQ done CMP R7, R4 ISEL $0, R7, R4, R7 // Take the lower bound of lengths of x,z SLD $3, R7, R7 SUB R6, R3, R11 // dest - src CMPU R11, R7, CR2 // < len? BLT CR2, backward // there is overlap, copy backwards MOVD $0, R14 // shlVU processes backwards, but added a forward copy option // since its faster on POWER repeat:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 18:17:17 UTC 2024 - 16.8K bytes - Viewed (0)