- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 12 for mfvsrwz (0.15 sec)
-
src/cmd/internal/obj/ppc64/anames.go
"STXVW4X", "STXVH8X", "STXVB16X", "STXVX", "LXSDX", "STXSDX", "LXSIWAX", "LXSIWZX", "STXSIWX", "MFVSRD", "MFFPRD", "MFVRD", "MFVSRWZ", "MFVSRLD", "MTVSRD", "MTFPRD", "MTVRD", "MTVSRWA", "MTVSRWZ", "MTVSRDD", "MTVSRWS", "XXLAND", "XXLANDC", "XXLEQV", "XXLNAND", "XXLOR", "XXLORC", "XXLNOR", "XXLORQ", "XXLXOR",
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 01 18:50:29 UTC 2024 - 6.7K bytes - Viewed (0) -
src/cmd/asm/internal/asm/testdata/ppc64.s
STXSIWX VS1, (R3) // 7c201918 MFVSRD VS1, R3 // 7c230066 MTFPRD R3, F0 // 7c030166 MFVRD V0, R3 // 7c030067 MFVSRLD VS63,R4 // 7fe40267 MFVSRLD V31,R4 // 7fe40267 MFVSRWZ VS33,R4 // 7c2400e7 MFVSRWZ V1,R4 // 7c2400e7 MTVSRD R3, VS1 // 7c230166
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 21:53:50 UTC 2024 - 50.2K bytes - Viewed (0) -
src/cmd/internal/obj/ppc64/asm9.go
opset(ASTXSIWX, r0) case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */ opset(AMFFPRD, r0) opset(AMFVRD, r0) opset(AMFVSRWZ, r0) opset(AMFVSRLD, r0) case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */ opset(AMTFPRD, r0) opset(AMTVRD, r0) opset(AMTVSRWA, r0) opset(AMTVSRWZ, r0) opset(AMTVSRWS, r0)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 13:55:28 UTC 2024 - 156.1K bytes - Viewed (0) -
src/internal/bytealg/count_ppc64x.s
VADDUDM V0, V4, V4 // Accumulate the popcounts. They are 8x the count. VADDUDM V2, V5, V5 // The count will be fixed up afterwards. ADD $32, R3 BDNZ cmploop VADDUDM V4, V5, V5 MFVSRD V5, R18 VSLDOI $8, V5, V5, V5 MFVSRD V5, R21 ADD R21, R18, R18 ANDCC $31, R4, R4 // Skip the tail processing if no bytes remaining. BEQ tail_0 #ifdef GOPPC64_power10
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Aug 14 20:30:44 UTC 2023 - 3.6K bytes - Viewed (0) -
src/internal/bytealg/compare_ppc64x.s
VPERM V3,V3,SWAP,V3 VPERM V4,V4,SWAP,V4 #endif MFVSRD VS35,R16 // move upper doublewords of A and B into GPR for comparison MFVSRD VS36,R10 CMPU R16,R10 BEQ lower SETB_CR0_NE(R3) RET PCALIGN $16 lower: VSLDOI $8,V3,V3,V3 // move lower doublewords of A and B into GPR for comparison MFVSRD VS35,R16 VSLDOI $8,V4,V4,V4 MFVSRD VS36,R10 CMPU R16,R10 SETB_CR0_NE(R3) RET
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Aug 28 17:33:20 UTC 2023 - 6.7K bytes - Viewed (0) -
src/cmd/internal/obj/ppc64/a.out.go
ASTXVW4X ASTXVH8X ASTXVB16X ASTXVX ALXSDX ASTXSDX ALXSIWAX ALXSIWZX ASTXSIWX AMFVSRD AMFFPRD AMFVRD AMFVSRWZ AMFVSRLD AMTVSRD AMTFPRD AMTVRD AMTVSRWA AMTVSRWZ AMTVSRDD AMTVSRWS AXXLAND AXXLANDC AXXLEQV AXXLNAND AXXLOR AXXLORC AXXLNOR AXXLORQ AXXLXOR AXXSEL AXXMRGHW
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 01 18:50:29 UTC 2024 - 16K bytes - Viewed (0) -
test/codegen/math.go
func fnma(x, y, z float64) float64 { // riscv64:"FNMADDD",-"FNMSUBD" return math.FMA(x, -y, -z) } func fromFloat64(f64 float64) uint64 { // amd64:"MOVQ\tX.*, [^X].*" // arm64:"FMOVD\tF.*, R.*" // ppc64x:"MFVSRD" // mips64/hardfloat:"MOVV\tF.*, R.*" return math.Float64bits(f64+1) + 1 } func fromFloat32(f32 float32) uint32 { // amd64:"MOVL\tX.*, [^X].*" // arm64:"FMOVS\tF.*, R.*"
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 04 15:24:29 UTC 2024 - 6.2K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/PPC64.rules
// Lowering float <=> int (Cvt32to(32|64)F x) => ((FCFIDS|FCFID) (MTVSRD (SignExt32to64 x))) (Cvt64to(32|64)F x) => ((FCFIDS|FCFID) (MTVSRD x)) (Cvt32Fto(32|64) x) => (MFVSRD (FCTI(W|D)Z x)) (Cvt64Fto(32|64) x) => (MFVSRD (FCTI(W|D)Z x)) (Cvt32Fto64F ...) => (Copy ...) // Note v will have the wrong type for patterns dependent on Float32/Float64 (Cvt64Fto32F ...) => (FRSP ...) (CvtBoolToUint8 ...) => (Copy ...)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 19:02:52 UTC 2024 - 53.2K bytes - Viewed (0) -
src/hash/crc32/crc32_ppc64le.s
VSLDOI $8,V0,zeroes,V0 #else VAND V0,mask_32bit,V1 VPMSUMD V1,const1,V1 VAND V1,mask_32bit,V1 VPMSUMD V1,const2,V1 VXOR V0,V1,V0 VSLDOI $4,V0,zeroes,V0 #endif MFVSRD VS32,R3 // VS32 = V0 NOR R3,R3,R3 // return ^crc MOVW R3,ret+32(FP) RET first_warm_up_done: LVX (R3),const1 ADD $16,R3 VPMSUMD V16,const1,V8 VPMSUMD V17,const1,V9
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 06 12:09:50 UTC 2024 - 13.1K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/PPC64Ops.go
// There are optimizations that should apply -- (Xi2f64 (MOVWload (not-ADD-ptr+offset) ) ) could use // the word-load instructions. (Xi2f64 (MOVDload ptr )) can be (FMOVDload ptr) {name: "MFVSRD", argLength: 1, reg: fpgp, asm: "MFVSRD", typ: "Int64"}, // move 64 bits of F register into G register {name: "MTVSRD", argLength: 1, reg: gpfp, asm: "MTVSRD", typ: "Float64"}, // move 64 bits of G register into F register
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 19:59:38 UTC 2024 - 43.8K bytes - Viewed (0)