diff options
| -rw-r--r-- | llvm/lib/Target/X86/X86MCInstLower.cpp | 12 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-mul.ll | 16 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-trunc-math.ll | 4 |
3 files changed, 22 insertions, 10 deletions
diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp index ec85da1..b6e3099 100644 --- a/llvm/lib/Target/X86/X86MCInstLower.cpp +++ b/llvm/lib/Target/X86/X86MCInstLower.cpp @@ -1928,6 +1928,17 @@ static void addConstantComments(const MachineInstr *MI, #define INSTR_CASE(Prefix, Instr, Suffix, Postfix) \ case X86::Prefix##Instr##Suffix##rm##Postfix: +#define CASE_AVX512_ARITH_RM(Instr) \ + INSTR_CASE(V, Instr, Z128, ) \ + INSTR_CASE(V, Instr, Z128, k) \ + INSTR_CASE(V, Instr, Z128, kz) \ + INSTR_CASE(V, Instr, Z256, ) \ + INSTR_CASE(V, Instr, Z256, k) \ + INSTR_CASE(V, Instr, Z256, kz) \ + INSTR_CASE(V, Instr, Z, ) \ + INSTR_CASE(V, Instr, Z, k) \ + INSTR_CASE(V, Instr, Z, kz) + #define CASE_ARITH_RM(Instr) \ INSTR_CASE(, Instr, , ) /* SSE */ \ INSTR_CASE(V, Instr, , ) /* AVX-128 */ \ @@ -1960,6 +1971,7 @@ static void addConstantComments(const MachineInstr *MI, CASE_ARITH_RM(PMADDWD) CASE_ARITH_RM(PMULLD) + CASE_AVX512_ARITH_RM(PMULLQ) CASE_ARITH_RM(PMULLW) CASE_ARITH_RM(PMULHW) CASE_ARITH_RM(PMULHUW) diff --git a/llvm/test/CodeGen/X86/vector-mul.ll b/llvm/test/CodeGen/X86/vector-mul.ll index 497d2f6..7085f834 100644 --- a/llvm/test/CodeGen/X86/vector-mul.ll +++ b/llvm/test/CodeGen/X86/vector-mul.ll @@ -989,7 +989,7 @@ define <2 x i64> @mul_v2i64_17_65(<2 x i64> %a0) nounwind { ; ; X64-AVX512DQ-LABEL: mul_v2i64_17_65: ; X64-AVX512DQ: # %bb.0: -; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [17,65] ; X64-AVX512DQ-NEXT: retq %1 = mul <2 x i64> %a0, <i64 17, i64 65> ret <2 x i64> %1 @@ -1384,7 +1384,7 @@ define <2 x i64> @mul_v2i64_15_63(<2 x i64> %a0) nounwind { ; ; X64-AVX512DQ-LABEL: mul_v2i64_15_63: ; X64-AVX512DQ: # %bb.0: -; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [15,63] ; X64-AVX512DQ-NEXT: retq %1 = mul <2 x i64> %a0, <i64 15, i64 63> ret <2 x i64> %1 @@ -1473,7 +1473,7 @@ define <2 x i64> @mul_v2i64_neg_15_63(<2 x i64> %a0) nounwind { ; ; X64-AVX512DQ-LABEL: mul_v2i64_neg_15_63: ; X64-AVX512DQ: # %bb.0: -; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [18446744073709551601,18446744073709551553] ; X64-AVX512DQ-NEXT: retq %1 = mul <2 x i64> %a0, <i64 -15, i64 -63> ret <2 x i64> %1 @@ -1562,7 +1562,7 @@ define <2 x i64> @mul_v2i64_neg_17_65(<2 x i64> %a0) nounwind { ; ; X64-AVX512DQ-LABEL: mul_v2i64_neg_17_65: ; X64-AVX512DQ: # %bb.0: -; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [18446744073709551599,18446744073709551551] ; X64-AVX512DQ-NEXT: retq %1 = mul <2 x i64> %a0, <i64 -17, i64 -65> ret <2 x i64> %1 @@ -1674,7 +1674,7 @@ define <2 x i64> @mul_v2i64_neg_0_1(<2 x i64> %a0) nounwind { ; ; X64-AVX512DQ-LABEL: mul_v2i64_neg_0_1: ; X64-AVX512DQ: # %bb.0: -; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255] ; X64-AVX512DQ-NEXT: retq %1 = mul <2 x i64> %a0, <i64 0, i64 -1> ret <2 x i64> %1 @@ -1763,7 +1763,7 @@ define <2 x i64> @mul_v2i64_15_neg_63(<2 x i64> %a0) nounwind { ; ; X64-AVX512DQ-LABEL: mul_v2i64_15_neg_63: ; X64-AVX512DQ: # %bb.0: -; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [15,18446744073709551553] ; X64-AVX512DQ-NEXT: retq %1 = mul <2 x i64> %a0, <i64 15, i64 -63> ret <2 x i64> %1 @@ -1947,7 +1947,7 @@ define <2 x i64> @mul_v2i64_68_132(<2 x i64> %x) nounwind { ; ; X64-AVX512DQ-LABEL: mul_v2i64_68_132: ; X64-AVX512DQ: # %bb.0: -; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [68,132] ; X64-AVX512DQ-NEXT: retq %mul = mul <2 x i64> %x, <i64 68, i64 132> ret <2 x i64> %mul @@ -2009,7 +2009,7 @@ define <2 x i64> @mul_v2i64_60_120(<2 x i64> %x) nounwind { ; ; X64-AVX512DQ-LABEL: mul_v2i64_60_120: ; X64-AVX512DQ: # %bb.0: -; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; X64-AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [60,124] ; X64-AVX512DQ-NEXT: retq %mul = mul <2 x i64> %x, <i64 60, i64 124> ret <2 x i64> %mul diff --git a/llvm/test/CodeGen/X86/vector-trunc-math.ll b/llvm/test/CodeGen/X86/vector-trunc-math.ll index e72dbca..73d84b1 100644 --- a/llvm/test/CodeGen/X86/vector-trunc-math.ll +++ b/llvm/test/CodeGen/X86/vector-trunc-math.ll @@ -2355,8 +2355,8 @@ define <16 x i8> @trunc_mul_const_v16i64_v16i8(<16 x i64> %a0) nounwind { ; ; AVX512DQ-LABEL: trunc_mul_const_v16i64_v16i8: ; AVX512DQ: # %bb.0: -; AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 -; AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 +; AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 # [0,1,2,3,4,5,6,7] +; AVX512DQ-NEXT: vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 # [8,9,10,11,12,13,14,15] ; AVX512DQ-NEXT: vpmovqb %zmm1, %xmm1 ; AVX512DQ-NEXT: vpmovqb %zmm0, %xmm0 ; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] |
