aboutsummaryrefslogtreecommitdiff
path: root/opcodes/i386-opc.tbl
diff options
context:
space:
mode:
authorH.J. Lu <hjl.tools@gmail.com>2018-01-08 04:36:59 -0800
committerH.J. Lu <hjl.tools@gmail.com>2018-01-08 04:37:20 -0800
commit704a705d7aaab8041df76e2981e2a1efc014aad0 (patch)
tree8792c8a27852863446dabab83c4fc53e558a24a4 /opcodes/i386-opc.tbl
parent605fd3c6590fbed834107a2e1d1df0ba58834576 (diff)
downloadgdb-704a705d7aaab8041df76e2981e2a1efc014aad0.zip
gdb-704a705d7aaab8041df76e2981e2a1efc014aad0.tar.gz
gdb-704a705d7aaab8041df76e2981e2a1efc014aad0.tar.bz2
x86: Properly encode vmovd with 64-bit memeory
For historical reason, we allow movd/vmovd with 64-bit register and memeory operands. But for vmovd, we failed to handle 64-bit memeory operand. This has been gone unnoticed since AT&T syntax always treats memory operand as 32-bit memory. This patch properly encodes vmovd with 64-bit memeory operands. It also removes AVX512 vmovd with 64-bit operands since GCC has case TYPE_SSEMOV: switch (get_attr_mode (insn)) { case MODE_DI: /* Handle broken assemblers that require movd instead of movq. */ if (!HAVE_AS_IX86_INTERUNIT_MOVQ && (GENERAL_REG_P (operands[0]) || GENERAL_REG_P (operands[1]))) return "%vmovd\t{%1, %0|%0, %1}"; return "%vmovq\t{%1, %0|%0, %1}"; and all AVX512 GNU assemblers set HAVE_AS_IX86_INTERUNIT_MOVQ, GCC won't generate AVX512 vmovd with 64-bit operand. gas/ PR gas/22681 * testsuite/gas/i386/i386.exp: Run x86-64-movd and x86-64-movd-intel. * testsuite/gas/i386/x86-64-movd-intel.d: New file. * testsuite/gas/i386/x86-64-movd.d: Likewise. * testsuite/gas/i386/x86-64-movd.s: Likewise. opcodes/ PR gas/22681 * i386-opc.tbl: Properly encode vmovd with Qword memeory operand. Remove AVX512 vmovd with 64-bit operands. * i386-tbl.h: Regenerated.
Diffstat (limited to 'opcodes/i386-opc.tbl')
-rw-r--r--opcodes/i386-opc.tbl6
1 files changed, 2 insertions, 4 deletions
diff --git a/opcodes/i386-opc.tbl b/opcodes/i386-opc.tbl
index a9cb428..464add0 100644
--- a/opcodes/i386-opc.tbl
+++ b/opcodes/i386-opc.tbl
@@ -2057,9 +2057,9 @@ vmovaps, 2, 0x29, None, 1, CpuAVX, Modrm|Vex|VexOpcode=0|VexW=1|CheckRegSize|No_
// support assembler for AMD64, we accept 64bit operand on vmovd so
// that we can use one template for both SSE and AVX instructions.
vmovd, 2, 0x666e, None, 1, CpuAVX, Modrm|Vex=3|VexOpcode=0|IgnoreSize|No_bSuf|No_wSuf|No_lSuf|No_sSuf|No_qSuf|No_ldSuf, { Reg32|Dword|Unspecified|BaseIndex, RegXMM }
-vmovd, 2, 0x666e, None, 1, CpuAVX|Cpu64, Modrm|Vex=3|VexOpcode=0|IgnoreSize|No_bSuf|No_wSuf|No_lSuf|No_sSuf|No_qSuf|No_ldSuf|Rex64, { Reg64|Qword|BaseIndex, RegXMM }
+vmovd, 2, 0x666e, None, 1, CpuAVX|Cpu64, Modrm|Vex=3|VexOpcode=0|IgnoreSize|No_bSuf|No_wSuf|No_lSuf|No_sSuf|No_qSuf|No_ldSuf|Size64, { Reg64|Qword|BaseIndex, RegXMM }
vmovd, 2, 0x667e, None, 1, CpuAVX, Modrm|Vex=3|VexOpcode=0|IgnoreSize|No_bSuf|No_wSuf|No_lSuf|No_sSuf|No_qSuf|No_ldSuf, { RegXMM, Dword|Unspecified|Reg32|BaseIndex }
-vmovd, 2, 0x667e, None, 1, CpuAVX|Cpu64, Modrm|Vex=3|VexOpcode=0|IgnoreSize|No_bSuf|No_wSuf|No_lSuf|No_sSuf|No_qSuf|No_ldSuf|Rex64, { RegXMM, Qword|Reg64|BaseIndex }
+vmovd, 2, 0x667e, None, 1, CpuAVX|Cpu64, Modrm|Vex=3|VexOpcode=0|IgnoreSize|No_bSuf|No_wSuf|No_lSuf|No_sSuf|No_qSuf|No_ldSuf|Size64, { RegXMM, Reg64|Qword|BaseIndex }
vmovddup, 2, 0xf212, None, 1, CpuAVX, Modrm|Vex|VexOpcode=0|VexW=1|IgnoreSize|No_bSuf|No_wSuf|No_lSuf|No_sSuf|No_qSuf|No_ldSuf, { Qword|Unspecified|BaseIndex|RegXMM, RegXMM }
vmovddup, 2, 0xf212, None, 1, CpuAVX, Modrm|Vex=2|VexOpcode=0|VexW=1|IgnoreSize|No_bSuf|No_wSuf|No_lSuf|No_sSuf|No_qSuf|No_ldSuf, { Ymmword|Unspecified|BaseIndex|RegYMM, RegYMM }
vmovdqa, 2, 0x666f, None, 1, CpuAVX, Load|Modrm|Vex|VexOpcode=0|VexW=1|CheckRegSize|No_bSuf|No_wSuf|No_lSuf|No_sSuf|No_qSuf|No_ldSuf, { Unspecified|BaseIndex|RegXMM|RegYMM, RegXMM|RegYMM }
@@ -3873,9 +3873,7 @@ vmovups, 2, 0x10, None, 1, CpuAVX512F, Modrm|EVex=1|Masking=3|VexOpcode=0|VexW=1
vmovups, 2, 0x11, None, 1, CpuAVX512F, Modrm|EVex=1|Masking=3|VexOpcode=0|VexW=1|IgnoreSize|No_bSuf|No_wSuf|No_lSuf|No_sSuf|No_qSuf|No_ldSuf, { RegZMM, RegZMM|RegMem }
vmovd, 2, 0x666E, None, 1, CpuAVX512F, Modrm|EVex=4|VexOpcode=0|VexW=1|Disp8MemShift=2|IgnoreSize|No_bSuf|No_wSuf|No_lSuf|No_sSuf|No_qSuf|No_ldSuf, { Reg32|Dword|Unspecified|BaseIndex, RegXMM }
-vmovd, 2, 0x666E, None, 1, CpuAVX512F|Cpu64, Modrm|EVex=4|VexOpcode=0|VexW=1|Disp8MemShift=2|IgnoreSize|No_bSuf|No_wSuf|No_lSuf|No_sSuf|No_qSuf|No_ldSuf|Rex64, { Reg64|Qword|Unspecified|BaseIndex, RegXMM }
vmovd, 2, 0x667E, None, 1, CpuAVX512F, Modrm|EVex=4|VexOpcode=0|VexW=1|Disp8MemShift=2|IgnoreSize|No_bSuf|No_wSuf|No_lSuf|No_sSuf|No_qSuf|No_ldSuf, { RegXMM, Reg32|Dword|Unspecified|BaseIndex }
-vmovd, 2, 0x667E, None, 1, CpuAVX512F|Cpu64, Modrm|EVex=4|VexOpcode=0|VexW=1|Disp8MemShift=2|IgnoreSize|No_bSuf|No_wSuf|No_lSuf|No_sSuf|No_qSuf|No_ldSuf|Rex64, { RegXMM, Reg64|Qword|Unspecified|BaseIndex }
vmovddup, 2, 0xF212, None, 1, CpuAVX512F, Modrm|EVex=1|Masking=3|VexOpcode=0|VexW=2|VecESize=1|Disp8MemShift=6|IgnoreSize|No_bSuf|No_wSuf|No_lSuf|No_sSuf|No_qSuf|No_ldSuf, { RegZMM|ZMMword|Unspecified|BaseIndex, RegZMM }