aboutsummaryrefslogtreecommitdiff
path: root/gas/config/tc-i386.c
diff options
context:
space:
mode:
Diffstat (limited to 'gas/config/tc-i386.c')
-rw-r--r--gas/config/tc-i386.c3205
1 files changed, 1460 insertions, 1745 deletions
diff --git a/gas/config/tc-i386.c b/gas/config/tc-i386.c
index 2e19431..1439708 100644
--- a/gas/config/tc-i386.c
+++ b/gas/config/tc-i386.c
@@ -138,6 +138,13 @@ typedef struct
}
arch_entry;
+/* Modes for parse_insn() to operate in. */
+enum parse_mode {
+ parse_all,
+ parse_prefix,
+ parse_pseudo_prefix,
+};
+
static void update_code_flag (int, int);
static void s_insn (int);
static void s_noopt (int);
@@ -163,8 +170,9 @@ static int i386_intel_operand (char *, int);
static int i386_intel_simplify (expressionS *);
static int i386_intel_parse_name (const char *, expressionS *);
static const reg_entry *parse_register (const char *, char **);
-static const char *parse_insn (const char *, char *, bool);
+static const char *parse_insn (const char *, char *, enum parse_mode);
static char *parse_operands (char *, const char *);
+static void copy_operand (unsigned int, unsigned int);
static void swap_operands (void);
static void swap_2_operands (unsigned int, unsigned int);
static enum i386_flag_code i386_addressing_mode (void);
@@ -186,7 +194,7 @@ static void output_disp (fragS *, offsetT);
#ifdef OBJ_AOUT
static void s_bss (int);
#endif
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+#ifdef OBJ_ELF
static void handle_large_common (int small ATTRIBUTE_UNUSED);
/* GNU_PROPERTY_X86_ISA_1_USED. */
@@ -267,6 +275,32 @@ enum i386_error
internal_error,
};
+#ifdef OBJ_ELF
+enum x86_tls_error_type
+{
+ x86_tls_error_continue,
+ x86_tls_error_none,
+ x86_tls_error_insn,
+ x86_tls_error_opcode,
+ x86_tls_error_sib,
+ x86_tls_error_no_base_reg,
+ x86_tls_error_require_no_base_index_reg,
+ x86_tls_error_base_reg,
+ x86_tls_error_index_ebx,
+ x86_tls_error_eax,
+ x86_tls_error_RegA,
+ x86_tls_error_ebx,
+ x86_tls_error_rip,
+ x86_tls_error_dest_eax,
+ x86_tls_error_dest_rdi,
+ x86_tls_error_scale_factor,
+ x86_tls_error_base_reg_size,
+ x86_tls_error_dest_32bit_reg_size,
+ x86_tls_error_dest_64bit_reg_size,
+ x86_tls_error_dest_32bit_or_64bit_reg_size
+};
+#endif
+
struct _i386_insn
{
/* TM holds the template for the insn were currently assembling. */
@@ -358,6 +392,9 @@ struct _i386_insn
/* Has GOTPC or TLS relocation. */
bool has_gotpc_tls_reloc;
+ /* Has relocation entry from the gotrel array. */
+ bool has_gotrel;
+
/* RM and SIB are the modrm byte and the sib byte where the
addressing modes of this insn are encoded. */
modrm_byte rm;
@@ -426,51 +463,9 @@ struct _i386_insn
#define OSZC_OF 8
unsigned int oszc_flags;
- /* Prefer load or store in encoding. */
- enum
- {
- dir_encoding_default = 0,
- dir_encoding_load,
- dir_encoding_store,
- dir_encoding_swap
- } dir_encoding;
-
- /* Prefer 8bit, 16bit, 32bit displacement in encoding. */
- enum
- {
- disp_encoding_default = 0,
- disp_encoding_8bit,
- disp_encoding_16bit,
- disp_encoding_32bit
- } disp_encoding;
-
- /* Prefer the REX byte in encoding. */
- bool rex_encoding;
-
- /* Prefer the REX2 prefix in encoding. */
- bool rex2_encoding;
-
- /* No CSPAZO flags update. */
- bool has_nf;
-
- /* Disable instruction size optimization. */
- bool no_optimize;
-
/* Invert the condition encoded in a base opcode. */
bool invert_cond;
- /* How to encode instructions. */
- enum
- {
- encoding_default = 0,
- encoding_vex,
- encoding_vex3,
- encoding_egpr, /* REX2 or EVEX. */
- encoding_evex,
- encoding_evex512,
- encoding_error
- } encoding;
-
/* REP prefix. */
const char *rep_prefix;
@@ -489,6 +484,48 @@ struct _i386_insn
typedef struct _i386_insn i386_insn;
+/* Pseudo-prefix recording state, separate from i386_insn. */
+static struct pseudo_prefixes {
+ /* How to encode instructions. */
+ enum {
+ encoding_default = 0,
+ encoding_vex,
+ encoding_vex3,
+ encoding_egpr, /* REX2 or EVEX. */
+ encoding_evex,
+ encoding_evex512,
+ encoding_error
+ } encoding;
+
+ /* Prefer load or store in encoding. */
+ enum {
+ dir_encoding_default = 0,
+ dir_encoding_load,
+ dir_encoding_store,
+ dir_encoding_swap
+ } dir_encoding;
+
+ /* Prefer 8bit, 16bit, 32bit displacement in encoding. */
+ enum {
+ disp_encoding_default = 0,
+ disp_encoding_8bit,
+ disp_encoding_16bit,
+ disp_encoding_32bit
+ } disp_encoding;
+
+ /* Prefer the REX byte in encoding. */
+ bool rex_encoding;
+
+ /* Prefer the REX2 prefix in encoding. */
+ bool rex2_encoding;
+
+ /* No CSPAZO flags update. */
+ bool has_nf;
+
+ /* Disable instruction size optimization. */
+ bool no_optimize;
+} pp;
+
/* Link RC type with corresponding string, that'll be looked for in
asm. */
struct RC_name
@@ -519,7 +556,7 @@ static const unsigned char i386_seg_prefixes[] = {
/* List of chars besides those in app.c:symbol_chars that can start an
operand. Used to prevent the scrubber eating vital white-space. */
-const char extra_symbol_chars[] = "*%-([{}"
+const char extra_symbol_chars[] = "*%-(["
#ifdef LEX_AT
"@"
#endif
@@ -528,7 +565,7 @@ const char extra_symbol_chars[] = "*%-([{}"
#endif
;
-#if ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
+#if (defined (OBJ_ELF) \
&& !defined (TE_GNU) \
&& !defined (TE_LINUX) \
&& !defined (TE_Haiku) \
@@ -580,7 +617,7 @@ static char operand_chars[256];
/* All non-digit non-letter characters that may occur in an operand and
which aren't already in extra_symbol_chars[]. */
-static const char operand_special_chars[] = "$+,)._~/<>|&^!=:@]";
+static const char operand_special_chars[] = "$+,)._~/<>|&^!=:@]{}";
/* md_assemble() always leaves the strings it's passed unaltered. To
effect this we maintain a stack of saved characters that we've smashed
@@ -617,7 +654,7 @@ static int use_rela_relocations = 0;
/* __tls_get_addr/___tls_get_addr symbol for TLS. */
static const char *tls_get_addr;
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+#ifdef OBJ_ELF
/* The ELF ABI to use. */
enum x86_elf_abi
@@ -635,15 +672,15 @@ static enum x86_elf_abi x86_elf_abi = I386_ABI;
static int use_big_obj = 0;
#endif
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+#ifdef OBJ_ELF
/* 1 if generating code for a shared library. */
static int shared = 0;
unsigned int x86_sframe_cfa_sp_reg;
/* The other CFA base register for SFrame stack trace info. */
unsigned int x86_sframe_cfa_fp_reg;
-unsigned int x86_sframe_cfa_ra_reg;
+static ginsnS *x86_ginsn_new (const symbolS *, enum ginsn_gen_mode);
#endif
/* 1 for intel syntax,
@@ -710,6 +747,9 @@ lfence_before_ret;
static int generate_relax_relocations
= DEFAULT_GENERATE_X86_RELAX_RELOCATIONS;
+/* 1 if the assembler should check tls relocation. */
+static bool tls_check = DEFAULT_X86_TLS_CHECK;
+
static enum check_kind
{
check_none = 0,
@@ -1179,6 +1219,8 @@ static const arch_entry cpu_arch[] =
VECARCH (avx10.1, AVX10_1, ANY_AVX512F, set),
SUBARCH (user_msr, USER_MSR, USER_MSR, false),
SUBARCH (apx_f, APX_F, APX_F, false),
+ VECARCH (avx10.2, AVX10_2, ANY_AVX10_2, set),
+ SUBARCH (gmi, GMI, GMI, false),
};
#undef SUBARCH
@@ -1264,7 +1306,7 @@ const pseudo_typeS md_pseudo_table[] =
{"disallow_index_reg", set_allow_index_reg, 0},
{"sse_check", set_check, 0},
{"operand_check", set_check, 1},
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+#ifdef OBJ_ELF
{"largecomm", handle_large_common, 0},
#else
{"file", dwarf2_directive_file, 0},
@@ -1286,6 +1328,96 @@ static htab_t op_hash;
/* Hash table for register lookup. */
static htab_t reg_hash;
+
+#if (defined (OBJ_ELF) || defined (OBJ_MACH_O) || defined (TE_PE))
+static const struct
+{
+ const char *str;
+ unsigned int len;
+ const enum bfd_reloc_code_real rel[2];
+ const i386_operand_type types64;
+ bool need_GOT_symbol;
+}
+gotrel[] =
+{
+#define OPERAND_TYPE_IMM32_32S_DISP32 { .bitfield = \
+ { .imm32 = 1, .imm32s = 1, .disp32 = 1 } }
+#define OPERAND_TYPE_IMM32_32S_64_DISP32 { .bitfield = \
+ { .imm32 = 1, .imm32s = 1, .imm64 = 1, .disp32 = 1 } }
+#define OPERAND_TYPE_IMM32_32S_64_DISP32_64 { .bitfield = \
+ { .imm32 = 1, .imm32s = 1, .imm64 = 1, .disp32 = 1, .disp64 = 1 } }
+#define OPERAND_TYPE_IMM64_DISP64 { .bitfield = \
+ { .imm64 = 1, .disp64 = 1 } }
+
+#ifndef TE_PE
+#ifdef OBJ_ELF
+ { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32,
+ BFD_RELOC_SIZE32 },
+ { .bitfield = { .imm32 = 1, .imm64 = 1 } }, false },
+#endif
+ { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
+ BFD_RELOC_X86_64_PLTOFF64 },
+ { .bitfield = { .imm64 = 1 } }, true },
+ { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
+ BFD_RELOC_X86_64_PLT32 },
+ OPERAND_TYPE_IMM32_32S_DISP32, false },
+ { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
+ BFD_RELOC_X86_64_GOTPLT64 },
+ OPERAND_TYPE_IMM64_DISP64, true },
+ { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
+ BFD_RELOC_X86_64_GOTOFF64 },
+ OPERAND_TYPE_IMM64_DISP64, true },
+ { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
+ BFD_RELOC_X86_64_GOTPCREL },
+ OPERAND_TYPE_IMM32_32S_DISP32, true },
+ { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
+ BFD_RELOC_X86_64_TLSGD },
+ OPERAND_TYPE_IMM32_32S_DISP32, true },
+ { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
+ _dummy_first_bfd_reloc_code_real },
+ OPERAND_TYPE_NONE, true },
+ { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
+ BFD_RELOC_X86_64_TLSLD },
+ OPERAND_TYPE_IMM32_32S_DISP32, true },
+ { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
+ BFD_RELOC_X86_64_GOTTPOFF },
+ OPERAND_TYPE_IMM32_32S_DISP32, true },
+ { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
+ BFD_RELOC_X86_64_TPOFF32 },
+ OPERAND_TYPE_IMM32_32S_64_DISP32_64, true },
+ { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
+ _dummy_first_bfd_reloc_code_real },
+ OPERAND_TYPE_NONE, true },
+ { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
+ BFD_RELOC_X86_64_DTPOFF32 },
+ OPERAND_TYPE_IMM32_32S_64_DISP32_64, true },
+ { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
+ _dummy_first_bfd_reloc_code_real },
+ OPERAND_TYPE_NONE, true },
+ { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
+ _dummy_first_bfd_reloc_code_real },
+ OPERAND_TYPE_NONE, true },
+ { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
+ BFD_RELOC_X86_64_GOT32 },
+ OPERAND_TYPE_IMM32_32S_64_DISP32, true },
+ { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
+ BFD_RELOC_X86_64_GOTPC32_TLSDESC },
+ OPERAND_TYPE_IMM32_32S_DISP32, true },
+ { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
+ BFD_RELOC_X86_64_TLSDESC_CALL },
+ OPERAND_TYPE_IMM32_32S_DISP32, true },
+#else /* TE_PE */
+ { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL,
+ BFD_RELOC_32_SECREL },
+ OPERAND_TYPE_IMM32_32S_64_DISP32_64, false },
+#endif
+
+#undef OPERAND_TYPE_IMM32_32S_DISP32
+#undef OPERAND_TYPE_IMM32_32S_64_DISP32
+#undef OPERAND_TYPE_IMM32_32S_64_DISP32_64
+#undef OPERAND_TYPE_IMM64_DISP64
+};
+#endif
/* Various efficient no-op patterns for aligning code labels.
Note: Don't try to assemble the instructions in the comments.
@@ -1930,10 +2062,10 @@ static const i386_cpu_flags avx512 = CPU_ANY_AVX512F_FLAGS;
static INLINE bool need_evex_encoding (const insn_template *t)
{
- return i.encoding == encoding_evex
- || i.encoding == encoding_evex512
- || i.has_nf
- || (t->opcode_modifier.vex && i.encoding == encoding_egpr)
+ return pp.encoding == encoding_evex
+ || pp.encoding == encoding_evex512
+ || pp.has_nf
+ || (t->opcode_modifier.vex && pp.encoding == encoding_egpr)
|| i.mask.reg;
}
@@ -2629,8 +2761,8 @@ static INLINE int
fits_in_imm4 (offsetT num)
{
/* Despite the name, check for imm3 if we're dealing with EVEX. */
- return (num & (i.encoding != encoding_evex
- && i.encoding != encoding_egpr ? 0xf : 7)) == num;
+ return (num & (pp.encoding != encoding_evex
+ && pp.encoding != encoding_egpr ? 0xf : 7)) == num;
}
static i386_operand_type
@@ -2946,12 +3078,9 @@ static void
check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
{
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
- static const char *arch;
-
/* Intel MCU is only supported on ELF. */
- if (!IS_ELF)
- return;
+#ifdef OBJ_ELF
+ static const char *arch;
if (!arch)
{
@@ -3073,9 +3202,16 @@ set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
const arch_stack_entry *top = arch_stack_top;
if (!top)
- as_bad (_(".arch stack is empty"));
- else if (top->flag_code != flag_code
- || top->stackop_size != stackop_size)
+ {
+ as_bad (_(".arch stack is empty"));
+ restore_bad:
+ (void) restore_line_pointer (e);
+ ignore_rest_of_line ();
+ return;
+ }
+
+ if (top->flag_code != flag_code
+ || top->stackop_size != stackop_size)
{
static const unsigned int bits[] = {
[CODE_16BIT] = 16,
@@ -3086,22 +3222,21 @@ set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
as_bad (_("this `.arch pop' requires `.code%u%s' to be in effect"),
bits[top->flag_code],
top->stackop_size == LONG_MNEM_SUFFIX ? "gcc" : "");
+ goto restore_bad;
}
- else
- {
- arch_stack_top = top->prev;
- cpu_arch_name = top->name;
- free (cpu_sub_arch_name);
- cpu_sub_arch_name = top->sub_name;
- cpu_arch_flags = top->flags;
- cpu_arch_isa = top->isa;
- cpu_arch_isa_flags = top->isa_flags;
- vector_size = top->vector_size;
- no_cond_jump_promotion = top->no_cond_jump_promotion;
+ arch_stack_top = top->prev;
- XDELETE (top);
- }
+ cpu_arch_name = top->name;
+ free (cpu_sub_arch_name);
+ cpu_sub_arch_name = top->sub_name;
+ cpu_arch_flags = top->flags;
+ cpu_arch_isa = top->isa;
+ cpu_arch_isa_flags = top->isa_flags;
+ vector_size = top->vector_size;
+ no_cond_jump_promotion = top->no_cond_jump_promotion;
+
+ XDELETE (top);
(void) restore_line_pointer (e);
demand_empty_rest_of_line ();
@@ -3144,18 +3279,14 @@ set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
{
as_bad (_("64bit mode not supported on `%s'."),
cpu_arch[j].name);
- (void) restore_line_pointer (e);
- ignore_rest_of_line ();
- return;
+ goto restore_bad;
}
if (flag_code == CODE_32BIT && !cpu_arch[j].enable.bitfield.cpui386)
{
as_bad (_("32bit mode not supported on `%s'."),
cpu_arch[j].name);
- (void) restore_line_pointer (e);
- ignore_rest_of_line ();
- return;
+ goto restore_bad;
}
cpu_arch_name = cpu_arch[j].name;
@@ -3235,12 +3366,13 @@ set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
}
if (j == ARRAY_SIZE (cpu_arch))
- as_bad (_("no such architecture: `%s'"), string);
-
- *input_line_pointer = e;
+ {
+ as_bad (_("no such architecture: `%s'"), string);
+ goto restore_bad;
+ }
no_cond_jump_promotion = 0;
- if (*input_line_pointer == ','
+ if (restore_line_pointer (e) == ','
&& !is_end_of_line[(unsigned char) input_line_pointer[1]])
{
++input_line_pointer;
@@ -3249,10 +3381,11 @@ set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
if (strcmp (string, "nojumps") == 0)
no_cond_jump_promotion = 1;
- else if (strcmp (string, "jumps") == 0)
- ;
- else
- as_bad (_("no such architecture modifier: `%s'"), string);
+ else if (strcmp (string, "jumps") != 0)
+ {
+ as_bad (_("no such architecture modifier: `%s'"), string);
+ goto restore_bad;
+ }
(void) restore_line_pointer (e);
}
@@ -3318,8 +3451,8 @@ op_lookup (const char *mnemonic)
void
md_begin (void)
{
- /* Support pseudo prefixes like {disp32}. */
- lex_type ['{'] = LEX_BEGIN_NAME;
+ /* Make sure possible padding space is clear. */
+ memset (&pp, 0, sizeof (pp));
/* Initialize op_hash hash table. */
op_hash = str_htab_create ();
@@ -3420,13 +3553,12 @@ md_begin (void)
if (object_64bit)
{
#if defined (OBJ_COFF) && defined (TE_PE)
- x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
- ? 32 : 16);
+ x86_dwarf2_return_column = 32;
#else
x86_dwarf2_return_column = 16;
#endif
x86_cie_data_alignment = -8;
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+#ifdef OBJ_ELF
x86_sframe_cfa_sp_reg = REG_SP;
x86_sframe_cfa_fp_reg = REG_FP;
#endif
@@ -3665,7 +3797,7 @@ reloc (unsigned int size,
break;
}
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+#ifdef OBJ_ELF
if (other == BFD_RELOC_SIZE32)
{
if (size == 8)
@@ -3736,7 +3868,7 @@ reloc (unsigned int size,
return NO_RELOC;
}
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+#ifdef OBJ_ELF
/* Here we decide which fixups can be adjusted to make them relative to
the beginning of the section instead of the symbol. Basically we need
to make sure that the dynamic relocations are done correctly, so in
@@ -3745,9 +3877,6 @@ reloc (unsigned int size,
int
tc_i386_fix_adjustable (fixS *fixP)
{
- if (!IS_ELF)
- return 1;
-
/* Don't adjust pc-relative references to merge sections in 64-bit
mode. */
if (use_rela_relocations
@@ -3793,12 +3922,18 @@ tc_i386_fix_adjustable (fixS *fixP)
|| fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
|| fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
|| fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
+ || fixP->fx_r_type == BFD_RELOC_X86_64_GOT64
|| fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
|| fixP->fx_r_type == BFD_RELOC_X86_64_CODE_4_GOTPC32_TLSDESC
|| fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
|| fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
|| fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
return 0;
+ /* Resolve PLT32 relocation against local symbol to section only for
+ PC-relative relocations. */
+ if (fixP->fx_r_type == BFD_RELOC_386_PLT32
+ || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32)
+ return fixP->fx_pcrel;
return 1;
}
#endif
@@ -3942,7 +4077,7 @@ build_vex_prefix (const insn_template *t)
{
unsigned int register_specifier;
unsigned int vector_length;
- unsigned int w;
+ bool w;
/* Check register specifier. */
if (i.vex.register_specifier)
@@ -3957,8 +4092,8 @@ build_vex_prefix (const insn_template *t)
/* Use 2-byte VEX prefix by swapping destination and source operand
if there are more than 1 register operand. */
if (i.reg_operands > 1
- && i.encoding != encoding_vex3
- && i.dir_encoding == dir_encoding_default
+ && pp.encoding != encoding_vex3
+ && pp.dir_encoding == dir_encoding_default
&& i.operands == i.reg_operands
&& operand_type_equal (&i.types[0], &i.types[i.operands - 1])
&& i.tm.opcode_space == SPACE_0F
@@ -3986,14 +4121,14 @@ build_vex_prefix (const insn_template *t)
/* Use 2-byte VEX prefix by swapping commutative source operands if there
are no memory operands and at least 3 register ones. */
if (i.reg_operands >= 3
- && i.encoding != encoding_vex3
+ && pp.encoding != encoding_vex3
&& i.reg_operands == i.operands - i.imm_operands
&& i.tm.opcode_modifier.vex
&& i.tm.opcode_modifier.commutative
/* .commutative aliases .staticrounding; disambiguate. */
&& !i.tm.opcode_modifier.sae
&& (i.tm.opcode_modifier.sse2avx
- || (optimize > 1 && !i.no_optimize))
+ || (optimize > 1 && !pp.no_optimize))
&& i.rex == REX_B
&& i.vex.register_specifier
&& !(i.vex.register_specifier->reg_flags & RegRex))
@@ -4041,26 +4176,26 @@ build_vex_prefix (const insn_template *t)
/* Check the REX.W bit and VEXW. */
if (i.tm.opcode_modifier.vexw == VEXWIG)
- w = (vexwig == vexw1 || (i.rex & REX_W)) ? 1 : 0;
+ w = vexwig == vexw1 || (i.rex & REX_W);
else if (i.tm.opcode_modifier.vexw && !(i.rex & REX_W))
- w = i.tm.opcode_modifier.vexw == VEXW1 ? 1 : 0;
+ w = i.tm.opcode_modifier.vexw == VEXW1;
else
- w = (flag_code == CODE_64BIT ? i.rex & REX_W : vexwig == vexw1) ? 1 : 0;
+ w = flag_code == CODE_64BIT ? i.rex & REX_W : vexwig == vexw1;
/* Use 2-byte VEX prefix if possible. */
if (w == 0
- && i.encoding != encoding_vex3
+ && pp.encoding != encoding_vex3
&& i.tm.opcode_space == SPACE_0F
&& (i.rex & (REX_W | REX_X | REX_B)) == 0)
{
/* 2-byte VEX prefix. */
- unsigned int r;
+ bool r;
i.vex.length = 2;
i.vex.bytes[0] = 0xc5;
/* Check the REX.R bit. */
- r = (i.rex & REX_R) ? 0 : 1;
+ r = !(i.rex & REX_R);
i.vex.bytes[1] = (r << 7
| register_specifier << 3
| vector_length << 2
@@ -4111,7 +4246,7 @@ is_any_vex_encoding (const insn_template *t)
static INLINE bool
is_apx_evex_encoding (void)
{
- return i.rex2 || i.tm.opcode_space == SPACE_EVEXMAP4 || i.has_nf
+ return i.rex2 || i.tm.opcode_space == SPACE_EVEXMAP4 || pp.has_nf
|| (i.vex.register_specifier
&& (i.vex.register_specifier->reg_flags & RegRex2));
}
@@ -4119,7 +4254,7 @@ is_apx_evex_encoding (void)
static INLINE bool
is_apx_rex2_encoding (void)
{
- return i.rex2 || i.rex2_encoding
+ return i.rex2 || pp.rex2_encoding
|| i.tm.opcode_modifier.rex2;
}
@@ -4198,7 +4333,8 @@ get_broadcast_bytes (const insn_template *t, bool diag)
static void
build_evex_prefix (void)
{
- unsigned int register_specifier, w;
+ unsigned int register_specifier;
+ bool w, u;
rex_byte vrex_used = 0;
/* Check register specifier. */
@@ -4265,16 +4401,68 @@ build_evex_prefix (void)
/* Check the REX.W bit and VEXW. */
if (i.tm.opcode_modifier.vexw == VEXWIG)
- w = (evexwig == evexw1 || (i.rex & REX_W)) ? 1 : 0;
+ w = evexwig == evexw1 || (i.rex & REX_W);
else if (i.tm.opcode_modifier.vexw && !(i.rex & REX_W))
- w = i.tm.opcode_modifier.vexw == VEXW1 ? 1 : 0;
+ w = i.tm.opcode_modifier.vexw == VEXW1;
else
- w = (flag_code == CODE_64BIT ? i.rex & REX_W : evexwig == evexw1) ? 1 : 0;
+ w = flag_code == CODE_64BIT ? i.rex & REX_W : evexwig == evexw1;
+
+ if (i.tm.opcode_modifier.evex == EVEXDYN)
+ {
+ unsigned int op;
+
+ /* Determine vector length from the last multi-length vector operand. */
+ for (op = i.operands; op--;)
+ if (i.tm.operand_types[op].bitfield.xmmword
+ + i.tm.operand_types[op].bitfield.ymmword
+ + i.tm.operand_types[op].bitfield.zmmword > 1)
+ {
+ if (i.types[op].bitfield.zmmword)
+ {
+ i.tm.opcode_modifier.evex = EVEX512;
+ break;
+ }
+ else if (i.types[op].bitfield.ymmword)
+ {
+ i.tm.opcode_modifier.evex = EVEX256;
+ break;
+ }
+ else if (i.types[op].bitfield.xmmword)
+ {
+ i.tm.opcode_modifier.evex = EVEX128;
+ break;
+ }
+ else if ((i.broadcast.type || i.broadcast.bytes)
+ && op == i.broadcast.operand)
+ {
+ switch (get_broadcast_bytes (&i.tm, true))
+ {
+ case 64:
+ i.tm.opcode_modifier.evex = EVEX512;
+ break;
+ case 32:
+ i.tm.opcode_modifier.evex = EVEX256;
+ break;
+ case 16:
+ i.tm.opcode_modifier.evex = EVEX128;
+ break;
+ default:
+ abort ();
+ }
+ break;
+ }
+ }
+
+ if (op >= MAX_OPERANDS)
+ abort ();
+ }
+
+ u = i.rounding.type == rc_none || i.tm.opcode_modifier.evex != EVEX256;
/* The third byte of the EVEX prefix. */
i.vex.bytes[2] = ((w << 7)
| (register_specifier << 3)
- | 4 /* Encode the U bit. */
+ | (u << 2)
| i.tm.opcode_modifier.opcodeprefix);
/* The fourth byte of the EVEX prefix. */
@@ -4288,57 +4476,6 @@ build_evex_prefix (void)
/* Encode the vector length. */
unsigned int vec_length;
- if (i.tm.opcode_modifier.evex == EVEXDYN)
- {
- unsigned int op;
-
- /* Determine vector length from the last multi-length vector
- operand. */
- for (op = i.operands; op--;)
- if (i.tm.operand_types[op].bitfield.xmmword
- + i.tm.operand_types[op].bitfield.ymmword
- + i.tm.operand_types[op].bitfield.zmmword > 1)
- {
- if (i.types[op].bitfield.zmmword)
- {
- i.tm.opcode_modifier.evex = EVEX512;
- break;
- }
- else if (i.types[op].bitfield.ymmword)
- {
- i.tm.opcode_modifier.evex = EVEX256;
- break;
- }
- else if (i.types[op].bitfield.xmmword)
- {
- i.tm.opcode_modifier.evex = EVEX128;
- break;
- }
- else if ((i.broadcast.type || i.broadcast.bytes)
- && op == i.broadcast.operand)
- {
- switch (get_broadcast_bytes (&i.tm, true))
- {
- case 64:
- i.tm.opcode_modifier.evex = EVEX512;
- break;
- case 32:
- i.tm.opcode_modifier.evex = EVEX256;
- break;
- case 16:
- i.tm.opcode_modifier.evex = EVEX128;
- break;
- default:
- abort ();
- }
- break;
- }
- }
-
- if (op >= MAX_OPERANDS)
- abort ();
- }
-
switch (i.tm.opcode_modifier.evex)
{
case EVEXLIG: /* LL' is ignored */
@@ -4391,7 +4528,8 @@ build_rex2_prefix (void)
i.vex.bytes[0] = 0xd5;
/* For the W R X B bits, the variables of rex prefix will be reused. */
i.vex.bytes[1] = ((i.tm.opcode_space << 7)
- | (i.rex2 << 4) | i.rex);
+ | (i.rex2 << 4)
+ | ((i.rex | i.prefix[REX_PREFIX]) & 0xf));
}
/* Build the EVEX prefix (4-byte) for evex insn
@@ -4400,9 +4538,37 @@ build_rex2_prefix (void)
| W | v`v`v`v | `x' | pp |
| z| L'L | b | `v | aaa |
*/
-static void
+static bool
build_apx_evex_prefix (void)
{
+ /* To mimic behavior for legacy insns, transform use of DATA16 and REX64 into
+ their embedded-prefix representations. */
+ if (i.tm.opcode_space == SPACE_EVEXMAP4)
+ {
+ if (i.prefix[DATA_PREFIX])
+ {
+ if (i.tm.opcode_modifier.opcodeprefix)
+ {
+ as_bad (i.tm.opcode_modifier.opcodeprefix == PREFIX_0X66
+ ? _("same type of prefix used twice")
+ : _("conflicting use of `data16' prefix"));
+ return false;
+ }
+ i.tm.opcode_modifier.opcodeprefix = PREFIX_0X66;
+ i.prefix[DATA_PREFIX] = 0;
+ }
+ if (i.prefix[REX_PREFIX] & REX_W)
+ {
+ if (i.suffix == QWORD_MNEM_SUFFIX)
+ {
+ as_bad (_("same type of prefix used twice"));
+ return false;
+ }
+ i.tm.opcode_modifier.vexw = VEXW1;
+ i.prefix[REX_PREFIX] = 0;
+ }
+ }
+
build_evex_prefix ();
if (i.rex2 & REX_R)
i.vex.bytes[1] &= ~0x10;
@@ -4436,8 +4602,10 @@ build_apx_evex_prefix (void)
}
/* Encode the NF bit. */
- if (i.has_nf)
+ if (pp.has_nf || i.tm.opcode_modifier.operandconstraint == EVEX_NF)
i.vex.bytes[3] |= 0x04;
+
+ return true;
}
static void establish_rex (void)
@@ -4450,9 +4618,9 @@ static void establish_rex (void)
i.rex |= i.prefix[REX_PREFIX] & REX_OPCODE;
/* For 8 bit RegRex64 registers without a prefix, we need an empty rex prefix. */
- if (((i.types[first].bitfield.class == Reg && i.types[first].bitfield.byte
+ if (((i.types[first].bitfield.class == Reg
&& (i.op[first].regs->reg_flags & RegRex64) != 0)
- || (i.types[last].bitfield.class == Reg && i.types[last].bitfield.byte
+ || (i.types[last].bitfield.class == Reg
&& (i.op[last].regs->reg_flags & RegRex64) != 0))
&& !is_apx_rex2_encoding () && !is_any_vex_encoding (&i.tm))
i.rex |= REX_OPCODE;
@@ -4466,9 +4634,8 @@ static void establish_rex (void)
{
/* Look for 8 bit operand that uses old registers. */
if (i.types[x].bitfield.class == Reg && i.types[x].bitfield.byte
- && (i.op[x].regs->reg_flags & RegRex64) == 0)
+ && !(i.op[x].regs->reg_flags & (RegRex | RegRex2 | RegRex64)))
{
- gas_assert (!(i.op[x].regs->reg_flags & RegRex));
/* In case it is "hi" register, give up. */
if (i.op[x].regs->reg_num > 3)
as_bad (_("can't encode register '%s%s' in an "
@@ -4484,7 +4651,7 @@ static void establish_rex (void)
}
}
- if (i.rex == 0 && i.rex2 == 0 && (i.rex_encoding || i.rex2_encoding))
+ if (i.rex == 0 && i.rex2 == 0 && (pp.rex_encoding || pp.rex2_encoding))
{
/* Check if we can add a REX_OPCODE byte. Look for 8 bit operand
that uses legacy register. If it is "hi" register, don't add
@@ -4494,24 +4661,42 @@ static void establish_rex (void)
for (x = first; x <= last; x++)
if (i.types[x].bitfield.class == Reg
&& i.types[x].bitfield.byte
- && (i.op[x].regs->reg_flags & RegRex64) == 0
+ && !(i.op[x].regs->reg_flags & (RegRex | RegRex2 | RegRex64))
&& i.op[x].regs->reg_num > 3)
{
- gas_assert (!(i.op[x].regs->reg_flags & RegRex));
- i.rex_encoding = false;
- i.rex2_encoding = false;
+ pp.rex_encoding = false;
+ pp.rex2_encoding = false;
break;
}
- if (i.rex_encoding)
+ if (pp.rex_encoding)
i.rex = REX_OPCODE;
}
if (is_apx_rex2_encoding ())
{
+ /* Most prefixes are not permitted with JMPABS. */
+ if (i.tm.mnem_off == MN_jmpabs)
+ {
+ if (i.prefix[DATA_PREFIX] || (i.prefix[REX_PREFIX] & REX_W))
+ {
+ as_bad (_("size override not allowed with `%s'"),
+ insn_name (&i.tm));
+ i.prefix[DATA_PREFIX] = 0;
+ i.prefix[REX_PREFIX] &= ~REX_W;
+ }
+ if (i.prefix[ADDR_PREFIX])
+ {
+ as_bad (_("address override not allowed with `%s'"),
+ insn_name (&i.tm));
+ i.prefix[ADDR_PREFIX] = 0;
+ }
+ }
+
build_rex2_prefix ();
/* The individual REX.RXBW bits got consumed. */
i.rex &= REX_OPCODE;
+ i.prefix[REX_PREFIX] = 0;
}
else if (i.rex != 0)
add_prefix (REX_OPCODE | i.rex);
@@ -4802,7 +4987,7 @@ optimize_encoding (void)
/* Squash the suffix. */
i.suffix = 0;
/* Convert to byte registers. 8-bit registers are special,
- RegRex64 and non-RegRex64 each have 8 registers. */
+ RegRex64 and non-RegRex* each have 8 registers. */
if (i.types[1].bitfield.word)
/* 32 (or 40) 8-bit registers. */
j = 32;
@@ -4896,7 +5081,7 @@ optimize_encoding (void)
}
else if (i.reg_operands == 3
&& i.op[0].regs == i.op[1].regs
- && i.encoding != encoding_evex
+ && pp.encoding != encoding_evex
&& (i.tm.mnem_off == MN_xor
|| i.tm.mnem_off == MN_sub))
{
@@ -4965,17 +5150,11 @@ optimize_encoding (void)
i.tm.base_opcode = 0x00;
i.tm.extension_opcode = None;
if (i.operands >= 2)
- {
- i.tm.operand_types[0] = i.tm.operand_types[1];
- i.op[0].regs = i.op[1].regs;
- i.types[0] = i.types[1];
- }
+ copy_operand (0, 1);
else
{
/* Legacy form with omitted shift count operand. */
- i.tm.operand_types[1] = i.tm.operand_types[0];
- i.op[1].regs = i.op[0].regs;
- i.types[1] = i.types[0];
+ copy_operand (1, 0);
i.operands = 2;
}
i.reg_operands++;
@@ -5017,13 +5196,48 @@ optimize_encoding (void)
break;
}
}
+ else if (optimize > 1
+ && (i.tm.base_opcode | 0xf) == 0x4f
+ && i.tm.opcode_space == SPACE_EVEXMAP4
+ && i.reg_operands == 3
+ && i.tm.opcode_modifier.operandconstraint == EVEX_NF
+ && !i.types[0].bitfield.word)
+ {
+ /* Optimize: -O2:
+ cfcmov<cc> %rM, %rN, %rN -> cmov<cc> %rM, %rN
+ cfcmov<cc> %rM, %rN, %rM -> cmov<!cc> %rN, %rM
+ cfcmov<cc> %rN, %rN, %rN -> nop %rN
+ */
+ if (i.op[0].regs == i.op[2].regs)
+ {
+ i.tm.base_opcode ^= 1;
+ i.op[0].regs = i.op[1].regs;
+ i.op[1].regs = i.op[2].regs;
+ }
+ else if (i.op[1].regs != i.op[2].regs)
+ return;
+
+ i.tm.opcode_space = SPACE_0F;
+ i.tm.opcode_modifier.evex = 0;
+ i.tm.opcode_modifier.vexvvvv = 0;
+ i.tm.opcode_modifier.operandconstraint = 0;
+ i.reg_operands = 2;
+
+ /* While at it, convert to NOP if all three regs match. */
+ if (i.op[0].regs == i.op[1].regs)
+ {
+ i.tm.base_opcode = 0x1f;
+ i.tm.extension_opcode = 0;
+ i.reg_operands = 1;
+ }
+ }
else if (i.reg_operands == 3
&& i.op[0].regs == i.op[1].regs
&& !i.types[2].bitfield.xmmword
&& (i.tm.opcode_modifier.vex
|| ((!i.mask.reg || i.mask.zeroing)
&& i.tm.opcode_modifier.evex
- && (i.encoding != encoding_evex
+ && (pp.encoding != encoding_evex
|| cpu_arch_isa_flags.bitfield.cpuavx512vl
|| is_cpu (&i.tm, CpuAVX512VL)
|| (i.tm.operand_types[2].bitfield.zmmword
@@ -5073,12 +5287,12 @@ optimize_encoding (void)
*/
if (i.tm.opcode_modifier.evex)
{
- if (i.encoding != encoding_evex)
+ if (pp.encoding != encoding_evex)
{
i.tm.opcode_modifier.vex = VEX128;
i.tm.opcode_modifier.vexw = VEXW0;
i.tm.opcode_modifier.evex = 0;
- i.encoding = encoding_vex;
+ pp.encoding = encoding_vex;
i.mask.reg = NULL;
}
else if (optimize > 1)
@@ -5101,8 +5315,8 @@ optimize_encoding (void)
i.types[j].bitfield.ymmword = 0;
}
}
- else if (i.encoding != encoding_evex
- && i.encoding != encoding_egpr
+ else if (pp.encoding != encoding_evex
+ && pp.encoding != encoding_egpr
&& !i.types[0].bitfield.zmmword
&& !i.types[1].bitfield.zmmword
&& !i.mask.reg
@@ -5226,10 +5440,7 @@ optimize_encoding (void)
i.reg_operands = 3;
i.tm.operands = 3;
- i.op[2].regs = i.op[0].regs;
- i.types[2] = i.types[0];
- i.flags[2] = i.flags[0];
- i.tm.operand_types[2] = i.tm.operand_types[0];
+ copy_operand (2, 0);
i.tm.opcode_modifier.sse2avx = 0;
}
@@ -5273,9 +5484,7 @@ optimize_encoding (void)
i.tm.extension_opcode = None;
if (i.tm.opcode_modifier.vexvvvv)
i.tm.opcode_modifier.vexvvvv = VexVVVV_SRC1;
- i.tm.operand_types[0] = i.tm.operand_types[1];
- i.op[0].regs = i.op[1].regs;
- i.types[0] = i.types[1];
+ copy_operand (0, 1);
i.reg_operands++;
i.imm_operands = 0;
}
@@ -5286,7 +5495,7 @@ optimize_encoding (void)
&& i.tm.opcode_modifier.vex
&& !(i.op[0].regs->reg_flags & RegRex)
&& i.op[0].regs->reg_type.bitfield.xmmword
- && i.encoding != encoding_vex3)
+ && pp.encoding != encoding_vex3)
{
/* Optimize: -Os:
vpbroadcastq %xmmN, %xmmM -> vpunpcklqdq %xmmN, %xmmN, %xmmM (N < 8)
@@ -5299,11 +5508,7 @@ optimize_encoding (void)
++i.reg_operands;
++i.tm.operands;
- i.op[2].regs = i.op[0].regs;
- i.types[2] = i.types[0];
- i.flags[2] = i.flags[0];
- i.tm.operand_types[2] = i.tm.operand_types[0];
-
+ copy_operand (2, 0);
swap_2_operands (1, 2);
}
else if (i.tm.base_opcode == 0x16
@@ -5331,19 +5536,176 @@ optimize_encoding (void)
= i.tm.opcode_modifier.sse2avx ? VEXW0 : VEXWIG;
}
- i.op[0].regs = i.op[1].regs;
- i.types[0] = i.types[1];
- i.flags[0] = i.flags[1];
- i.tm.operand_types[0] = i.tm.operand_types[1];
+ copy_operand (0, 1);
+ copy_operand (1, 2);
- i.op[1].regs = i.op[2].regs;
- i.types[1] = i.types[2];
- i.flags[1] = i.flags[2];
- i.tm.operand_types[1] = i.tm.operand_types[2];
+ i.operands = 2;
+ i.imm_operands = 0;
+ }
+ else if (i.tm.base_opcode == 0x17
+ && i.tm.opcode_space == SPACE_0F3A
+ && i.op[0].imms->X_op == O_constant
+ && i.op[0].imms->X_add_number == 0)
+ {
+ /* Optimize: -O:
+ extractps $0, %xmmN, %rM -> movd %xmmN, %rM
+ extractps $0, %xmmN, mem -> movss %xmmN, mem
+ vextractps $0, %xmmN, %rM -> vmovd %xmmN, %rM
+ vextractps $0, %xmmN, mem -> vmovss %xmmN, mem
+ */
+ i.tm.opcode_space = SPACE_0F;
+ i.tm.opcode_modifier.vexw = VEXW0;
+
+ if (!i.mem_operands)
+ i.tm.base_opcode = 0x7e;
+ else
+ {
+ i.tm.base_opcode = 0x11;
+ i.tm.opcode_modifier.opcodeprefix = PREFIX_0XF3;
+ }
+
+ copy_operand (0, 1);
+ copy_operand (1, 2);
i.operands = 2;
i.imm_operands = 0;
}
+ else if ((i.tm.base_opcode | 0x22) == 0x3b
+ && i.tm.opcode_space == SPACE_0F3A
+ && i.op[0].imms->X_op == O_constant
+ && i.op[0].imms->X_add_number == 0)
+ {
+ /* Optimize: -O:
+ vextractf128 $0, %ymmN, %xmmM -> vmovaps %xmmN, %xmmM
+ vextractf128 $0, %ymmN, mem -> vmovups %xmmN, mem
+ vextractf32x4 $0, %[yz]mmN, %xmmM -> vmovaps %xmmN, %xmmM
+ vextractf32x4 $0, %[yz]mmN, mem -> vmovups %xmmN, mem
+ vextractf64x2 $0, %[yz]mmN, %xmmM -> vmovapd %xmmN, %xmmM
+ vextractf64x2 $0, %[yz]mmN, mem -> vmovupd %xmmN, mem
+ vextractf32x8 $0, %zmmN, %ymmM -> vmovaps %ymmN, %ymmM
+ vextractf32x8 $0, %zmmN, mem -> vmovups %ymmN, mem
+ vextractf64x4 $0, %zmmN, %ymmM -> vmovapd %ymmN, %ymmM
+ vextractf64x4 $0, %zmmN, mem -> vmovupd %ymmN, mem
+ vextracti128 $0, %ymmN, %xmmM -> vmovdqa %xmmN, %xmmM
+ vextracti128 $0, %ymmN, mem -> vmovdqu %xmmN, mem
+ vextracti32x4 $0, %[yz]mmN, %xmmM -> vmovdqa{,32} %xmmN, %xmmM
+ vextracti32x4 $0, %[yz]mmN, mem -> vmovdqu{,32} %xmmN, mem
+ vextracti64x2 $0, %[yz]mmN, %xmmM -> vmovdqa{,64} %xmmN, %xmmM
+ vextracti64x2 $0, %[yz]mmN, mem -> vmovdqu{,64} %xmmN, mem
+ vextracti32x8 $0, %zmmN, %ymmM -> vmovdqa{,32} %ymmN, %ymmM
+ vextracti32x8 $0, %zmmN, mem -> vmovdqu{,32} %ymmN, mem
+ vextracti64x4 $0, %zmmN, %ymmM -> vmovdqa{,64} %ymmN, %ymmM
+ vextracti64x4 $0, %zmmN, mem -> vmovdqu{,64} %ymmN, mem
+ */
+ i.tm.opcode_space = SPACE_0F;
+
+ if (!i.mask.reg
+ && (pp.encoding <= encoding_vex3
+ || (pp.encoding == encoding_evex512
+ && (!i.base_reg || !(i.base_reg->reg_flags & RegRex2))
+ && (!i.index_reg || !(i.index_reg->reg_flags & RegRex2)))))
+ {
+ i.tm.opcode_modifier.vex = i.tm.base_opcode & 2 ? VEX256 : VEX128;
+ i.tm.opcode_modifier.evex = 0;
+ }
+ else
+ i.tm.opcode_modifier.evex = i.tm.base_opcode & 2 ? EVEX256 : EVEX128;
+
+ if (i.tm.base_opcode & 0x20)
+ {
+ i.tm.base_opcode = 0x7f;
+ if (i.reg_operands != 2)
+ i.tm.opcode_modifier.opcodeprefix = PREFIX_0XF3;
+ }
+ else
+ {
+ if (i.reg_operands == 2)
+ i.tm.base_opcode = 0x29;
+ else
+ i.tm.base_opcode = 0x11;
+ if (i.tm.opcode_modifier.vexw != VEXW1)
+ i.tm.opcode_modifier.opcodeprefix = PREFIX_NONE;
+ }
+
+ if (i.tm.opcode_modifier.vex)
+ i.tm.opcode_modifier.vexw = VEXWIG;
+
+ copy_operand (0, 1);
+ copy_operand (1, 2);
+
+ i.operands = 2;
+ i.imm_operands = 0;
+ }
+ else if (i.tm.base_opcode == 0x21
+ && i.tm.opcode_space == SPACE_0F3A
+ && i.op[0].imms->X_op == O_constant
+ && (i.operands == i.reg_operands + 1
+ ? i.op[0].imms->X_add_number == 0
+ || (i.op[0].imms->X_add_number & 0xf) == 0xf
+ : (i.op[0].imms->X_add_number & 0x3f) == 0x0e
+ && (i.reg_operands == 1 || i.op[2].regs == i.op[3].regs)))
+ {
+ /* Optimize: -O:
+ insertps $0b....1111, %xmmN, %xmmM -> xorps %xmmM, %xmmM
+ insertps $0b00000000, %xmmN, %xmmM -> movss %xmmN, %xmmM
+ insertps $0b..001110, mem, %xmmN -> movss mem, %xmmN
+ vinsertps $0b....1111, %xmmN, %xmmM, %xmmK -> vxorps %xmm?, %xmm?, %xmmK
+ vinsertps $0b00000000, %xmmN, %xmmM, %xmmK -> vmovss %xmmN, %xmmM, %xmmK
+ vinsertps $0b..001110, mem, %xmmN, %xmmN -> vmovss mem, %xmmN
+ */
+ i.tm.opcode_space = SPACE_0F;
+ if ((i.op[0].imms->X_add_number & 0xf) == 0xf)
+ {
+ i.tm.base_opcode = 0x57;
+ i.tm.opcode_modifier.opcodeprefix = PREFIX_NONE;
+
+ --i.operands;
+
+ copy_operand (i.operands - 1, i.operands);
+ copy_operand (1, i.operands - 1);
+ copy_operand (0, 1);
+
+ /* Switch from EVEX to VEX encoding if possible. Sadly we can't
+ (always) tell use of the {evex} pseudo-prefix (which otherwise
+ we'd like to respect) from use of %xmm16-%xmm31. */
+ if (pp.encoding == encoding_evex)
+ pp.encoding = encoding_default;
+ if (i.tm.opcode_modifier.evex
+ && pp.encoding <= encoding_vex3
+ && !(i.op[0].regs->reg_flags & RegVRex))
+ {
+ i.tm.opcode_modifier.evex = 0;
+ i.tm.opcode_modifier.vex = VEX128;
+ }
+
+ /* Switch from VEX3 to VEX2 encoding if possible. */
+ if (i.tm.opcode_modifier.vex
+ && pp.encoding <= encoding_vex
+ && (i.op[0].regs->reg_flags & RegRex))
+ {
+ i.op[0].regs -= 8;
+ i.op[1].regs = i.op[0].regs;
+ }
+ }
+ else
+ {
+ i.tm.base_opcode = 0x10;
+ i.tm.opcode_modifier.opcodeprefix = PREFIX_0XF3;
+
+ if (i.op[0].imms->X_add_number == 0)
+ --i.operands;
+ else
+ {
+ i.operands = 2;
+ i.tm.opcode_modifier.vexvvvv = 0;
+ }
+ copy_operand (0, 1);
+ copy_operand (1, 2);
+ copy_operand (2, 3);
+ }
+
+ i.imm_operands = 0;
+ }
}
/* Check whether the promoted (to address size) register is usable as index
@@ -5355,7 +5717,7 @@ static bool is_index (const reg_entry *r)
if (r->reg_type.bitfield.byte)
{
- if (!(r->reg_flags & RegRex64))
+ if (!(r->reg_flags & (RegRex | RegRex2 | RegRex64)))
{
if (r->reg_num >= 4)
return false;
@@ -5432,16 +5794,8 @@ optimize_nf_encoding (void)
= (i.op[0].imms->X_add_number == 1) != (i.tm.extension_opcode == 0);
i.tm.opcode_modifier.w = 1;
- i.types[0] = i.types[1];
- i.types[1] = i.types[2];
- i.tm.operand_types[0] = i.tm.operand_types[1];
- i.tm.operand_types[1] = i.tm.operand_types[2];
- i.op[0] = i.op[1];
- i.op[1] = i.op[2];
- i.flags[0] = i.flags[1];
- i.flags[1] = i.flags[2];
- i.reloc[0] = i.reloc[1];
- i.reloc[1] = NO_RELOC;
+ copy_operand (0, 1);
+ copy_operand (1, 2);
i.imm_operands = 0;
--i.operands;
@@ -5530,14 +5884,12 @@ optimize_nf_encoding (void)
i.tm.opcode_modifier.vexvvvv = VexVVVV_DST;
i.operands = 3;
i.reg_operands = 2;
- i.op[2].regs = i.op[1].regs;
- i.tm.operand_types[2] = i.tm.operand_types[1];
- i.types[2] = i.types[1];
+ copy_operand (2, 1);
}
}
if (optimize_for_space
- && i.encoding != encoding_evex
+ && pp.encoding != encoding_evex
&& (i.tm.base_opcode == 0x00
|| (i.tm.base_opcode == 0xd0 && i.tm.extension_opcode == 4))
&& !i.mem_operands
@@ -5546,7 +5898,7 @@ optimize_nf_encoding (void)
no size reduction would be possible. Plus 3-operand forms zero-
extend the result, which can't be expressed with LEA. */
&& (!i.types[1].bitfield.word
- || (i.operands == 2 && i.encoding != encoding_egpr))
+ || (i.operands == 2 && pp.encoding != encoding_egpr))
&& is_plausible_suffix (1)
/* %rsp can't be the index. */
&& (is_index (i.op[1].regs)
@@ -5556,7 +5908,7 @@ optimize_nf_encoding (void)
from that set and REX2 would be required to encode the insn, the
resulting encoding would be no smaller than the EVEX one. */
&& (i.op[1].regs->reg_num != 5
- || i.encoding != encoding_egpr
+ || pp.encoding != encoding_egpr
|| i.imm_operands > 0
|| i.op[0].regs->reg_num != 5))
{
@@ -5611,10 +5963,10 @@ optimize_nf_encoding (void)
i.operands = 2;
i.mem_operands = i.reg_operands = 1;
i.imm_operands = 0;
- i.has_nf = false;
+ pp.has_nf = false;
}
else if (optimize_for_space
- && i.encoding != encoding_evex
+ && pp.encoding != encoding_evex
&& (i.tm.base_opcode == 0x80 || i.tm.base_opcode == 0x83)
&& (i.tm.extension_opcode == 0
|| (i.tm.extension_opcode == 5
@@ -5625,14 +5977,14 @@ optimize_nf_encoding (void)
|| (i.types[1].bitfield.dword
&& !(i.op[1].regs->reg_flags & RegRex)
&& !(i.op[i.operands - 1].regs->reg_flags & RegRex)
- && i.encoding != encoding_egpr))))
+ && pp.encoding != encoding_egpr))))
&& !i.mem_operands
&& !i.types[1].bitfield.byte
/* 16-bit operand size has extra restrictions: If REX2 was needed,
no size reduction would be possible. Plus 3-operand forms zero-
extend the result, which can't be expressed with LEA. */
&& (!i.types[1].bitfield.word
- || (i.operands == 2 && i.encoding != encoding_egpr))
+ || (i.operands == 2 && pp.encoding != encoding_egpr))
&& is_plausible_suffix (1))
{
/* Optimize: -Os:
@@ -5676,11 +6028,11 @@ optimize_nf_encoding (void)
i.operands = 2;
i.disp_operands = i.mem_operands = i.reg_operands = 1;
i.imm_operands = 0;
- i.has_nf = false;
+ pp.has_nf = false;
}
else if (i.tm.base_opcode == 0x6b
&& !i.mem_operands
- && i.encoding != encoding_evex
+ && pp.encoding != encoding_evex
&& i.tm.mnem_off != MN_imulzu
&& is_plausible_suffix (1)
/* %rsp can't be the index. */
@@ -5690,7 +6042,7 @@ optimize_nf_encoding (void)
&& (!optimize_for_space
|| !i.types[1].bitfield.word
|| i.op[1].regs->reg_num != 5
- || i.encoding != encoding_egpr)
+ || pp.encoding != encoding_egpr)
&& i.op[0].imms->X_op == O_constant
&& (i.op[0].imms->X_add_number == 3
|| i.op[0].imms->X_add_number == 5
@@ -5724,14 +6076,117 @@ optimize_nf_encoding (void)
i.op[0].disps = NULL;
i.flags[0] = Operand_Mem;
- i.tm.operand_types[1] = i.tm.operand_types[i.operands - 1];
- i.op[1].regs = i.op[i.operands - 1].regs;
- i.types[1] = i.types[i.operands - 1];
+ copy_operand (1, i.operands - 1);
i.operands = 2;
i.mem_operands = i.reg_operands = 1;
i.imm_operands = 0;
- i.has_nf = false;
+ pp.has_nf = false;
+ }
+ else if (cpu_arch_isa_flags.bitfield.cpubmi2
+ && pp.encoding == encoding_default
+ && (i.operands > 2 || !i.mem_operands)
+ && (i.types[i.operands - 1].bitfield.dword
+ || i.types[i.operands - 1].bitfield.qword))
+ {
+ if (i.tm.base_opcode == 0xd2)
+ {
+ /* Optimize: -O:
+ <OP> one of sal, sar, shl, shr:
+ {nf} <OP> %cl, %rN -> <OP>x %{e,r}cx, %rN, %rN (N < 16)
+ {nf} <OP> %cl, ..., %rN -> <OP>x %{e,r}cx, ..., %rN (no eGPR used)
+ */
+ gas_assert (i.tm.extension_opcode & 4);
+ i.tm.operand_types[0] = i.tm.operand_types[i.operands - 1];
+ /* NB: i.op[0].regs specifying %cl is good enough. */
+ i.types[0] = i.types[i.operands - 1];
+ if (i.operands == 2)
+ {
+ i.tm.operand_types[0].bitfield.baseindex = 0;
+ i.tm.operand_types[2] = i.tm.operand_types[0];
+ i.op[2].regs = i.op[1].regs;
+ i.types[2] = i.types[1];
+ i.reg_operands = i.operands = 3;
+ }
+ pp.has_nf = false;
+ i.tm.opcode_modifier.w = 0;
+ i.tm.opcode_modifier.evex = 0;
+ i.tm.opcode_modifier.vex = VEX128;
+ i.tm.opcode_modifier.vexvvvv = VexVVVV_SRC2;
+ i.tm.opcode_space = SPACE_0F38;
+ i.tm.base_opcode = 0xf7;
+ i.tm.opcode_modifier.opcodeprefix
+ = !(i.tm.extension_opcode & 1)
+ ? PREFIX_0X66 /* shlx */
+ : i.tm.extension_opcode & 2
+ ? PREFIX_0XF3 /* sarx */
+ : PREFIX_0XF2 /* shrx */;
+ i.tm.extension_opcode = None;
+ }
+ else if (i.tm.base_opcode == 0xc0
+ && i.tm.extension_opcode <= 1
+ && i.op[0].imms->X_op == O_constant)
+ {
+ /* Optimize: -O:
+ {nf} rol $I, %rN -> rorx $osz-I, %rN, %rN (I != osz-1, N < 16)
+ {nf} rol $I, ..., %rN -> rorx $osz-I, ..., %rN (I != osz-1, no eGPR used)
+ {nf} ror $I, %rN -> rorx $I, %rN, %rN (I != 1, N < 16)
+ {nf} ror $I, ..., %rN -> rorx $I,..., %rN (I != 1, no eGPR used)
+ NB: rol -> ror transformation for I == osz-1 was already handled above.
+ NB2: ror with an immediate of 1 uses a different base opcode.
+ */
+ if (i.operands == 2)
+ {
+ copy_operand (2, 1);
+ i.tm.operand_types[2].bitfield.baseindex = 0;
+ i.reg_operands = 2;
+ i.operands = 3;
+ }
+ pp.has_nf = false;
+ i.tm.opcode_modifier.w = 0;
+ i.tm.opcode_modifier.evex = 0;
+ i.tm.opcode_modifier.vex = VEX128;
+ i.tm.opcode_modifier.vexvvvv = 0;
+ i.tm.opcode_space = SPACE_0F3A;
+ i.tm.base_opcode = 0xf0;
+ i.tm.opcode_modifier.opcodeprefix = PREFIX_0XF2;
+ if (!i.tm.extension_opcode)
+ i.op[0].imms->X_add_number =
+ (i.types[i.operands - 1].bitfield.byte
+ ? 8 : i.types[i.operands - 1].bitfield.word
+ ? 16 : 64 >> i.types[i.operands - 1].bitfield.dword)
+ - i.op[0].imms->X_add_number;
+ i.tm.extension_opcode = None;
+ }
+ else if (i.tm.base_opcode == 0xf6
+ && i.tm.extension_opcode == 4
+ && !i.mem_operands
+ && i.op[0].regs->reg_num == 2
+ && !(i.op[0].regs->reg_flags & RegRex) )
+ {
+ /* Optimize: -O:
+ {nf} mul %edx -> mulx %eax, %eax, %edx
+ {nf} mul %rdx -> mulx %rax, %rax, %rdx
+ */
+ i.tm.operand_types[1] = i.tm.operand_types[0];
+ i.tm.operand_types[1].bitfield.baseindex = 0;
+ i.tm.operand_types[2] = i.tm.operand_types[1];
+ i.op[2].regs = i.op[0].regs;
+ /* NB: %eax is good enough also for 64-bit operand size. */
+ i.op[1].regs = i.op[0].regs = reg_eax;
+ i.types[2] = i.types[1] = i.types[0];
+ i.reg_operands = i.operands = 3;
+
+ pp.has_nf = false;
+ i.tm.opcode_modifier.w = 0;
+ i.tm.opcode_modifier.evex = 0;
+ i.tm.opcode_modifier.vex = VEX128;
+ i.tm.opcode_modifier.vexvvvv = VexVVVV_SRC1;
+ i.tm.opcode_space = SPACE_0F38;
+ i.tm.base_opcode = 0xf6;
+ i.tm.opcode_modifier.opcodeprefix = PREFIX_0XF2;
+ i.tm.extension_opcode = None;
+ }
}
}
@@ -6085,1120 +6540,372 @@ static INLINE bool may_need_pass2 (const insn_template *t)
&& (t->base_opcode | 8) == 0x2c);
}
-#if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
-
-/* DWARF register number for EFLAGS. Used for pushf/popf insns. */
-#define GINSN_DW2_REGNUM_EFLAGS 49
-/* DWARF register number for RSI. Used as dummy value when RegIP/RegIZ. */
-#define GINSN_DW2_REGNUM_RSI_DUMMY 4
-
-/* Identify the callee-saved registers in System V AMD64 ABI. */
-
-bool
-x86_scfi_callee_saved_p (unsigned int dw2reg_num)
-{
- if (dw2reg_num == 3 /* rbx. */
- || dw2reg_num == REG_FP /* rbp. */
- || dw2reg_num == REG_SP /* rsp. */
- || (dw2reg_num >= 12 && dw2reg_num <= 15) /* r12 - r15. */)
- return true;
-
- return false;
-}
-
-/* Check whether an instruction prefix which affects operation size
- accompanies. For insns in the legacy space, setting REX.W takes precedence
- over the operand-size prefix (66H) when both are used.
-
- The current users of this API are in the handlers for PUSH, POP or other
- instructions which affect the stack pointer implicitly: the operation size
- (16, 32, or 64 bits) determines the amount by which the stack pointer is
- incremented / decremented (2, 4 or 8). */
-
-static bool
-ginsn_opsize_prefix_p (void)
-{
- return (!(i.prefix[REX_PREFIX] & REX_W) && i.prefix[DATA_PREFIX]);
-}
-
-/* Get the DWARF register number for the given register entry.
- For specific byte/word/dword register accesses like al, cl, ah, ch, r8d,
- r20w etc., we need to identify the DWARF register number for the
- corresponding 8-byte GPR.
-
- This function is a hack - it relies on relative ordering of reg entries in
- the i386_regtab. FIXME - it will be good to allow a more direct way to get
- this information. */
-
-static unsigned int
-ginsn_dw2_regnum (const reg_entry *ireg)
+#ifdef OBJ_ELF
+static enum x86_tls_error_type
+x86_check_tls_relocation (enum bfd_reloc_code_real r_type)
{
- const reg_entry *temp = ireg;
- unsigned int dwarf_reg = Dw2Inval, idx = 0;
-
- /* ginsn creation is available for AMD64 abi only ATM. Other flag_code
- are not expected. */
- gas_assert (ireg && flag_code == CODE_64BIT);
-
- /* Watch out for RegIP, RegIZ. These are expected to appear only with
- base/index addressing modes. Although creating inaccurate data
- dependencies, using a dummy value (lets say volatile register rsi) will
- not hurt SCFI. TBD_GINSN_GEN_NOT_SCFI. */
- if (ireg->reg_num == RegIP || ireg->reg_num == RegIZ)
- return GINSN_DW2_REGNUM_RSI_DUMMY;
-
- dwarf_reg = ireg->dw2_regnum[object_64bit];
-
- if (dwarf_reg == Dw2Inval)
- {
- if (ireg <= &i386_regtab[3])
- /* For al, cl, dl, bl, bump over to axl, cxl, dxl, bxl respectively by
- adding 8. */
- temp = ireg + 8;
- else if (ireg <= &i386_regtab[7])
- /* For ah, ch, dh, bh, bump over to axl, cxl, dxl, bxl respectively by
- adding 4. */
- temp = ireg + 4;
- else
- {
- /* The code relies on the relative ordering of the reg entries in
- i386_regtab. There are 32 register entries between axl-r31b,
- ax-r31w etc. The assertions here ensures the code does not
- recurse indefinitely. */
- gas_assert ((temp - &i386_regtab[0]) >= 0);
- idx = temp - &i386_regtab[0];
- gas_assert (idx + 32 < i386_regtab_size - 1);
-
- temp = temp + 32;
- }
-
- dwarf_reg = ginsn_dw2_regnum (temp);
- }
-
- /* Sanity check - failure may indicate state corruption, bad ginsn or
- perhaps the i386-reg table and the current function got out of sync. */
- gas_assert (dwarf_reg < Dw2Inval);
-
- return dwarf_reg;
-}
-
-static ginsnS *
-x86_ginsn_addsub_reg_mem (const symbolS *insn_end_sym)
-{
- unsigned int dw2_regnum;
- unsigned int src1_dw2_regnum;
- ginsnS *ginsn = NULL;
- ginsnS * (*ginsn_func) (const symbolS *, bool,
- enum ginsn_src_type, unsigned int, offsetT,
- enum ginsn_src_type, unsigned int, offsetT,
- enum ginsn_dst_type, unsigned int, offsetT);
- uint16_t opcode = i.tm.base_opcode;
-
- gas_assert (i.tm.opcode_space == SPACE_BASE
- && (opcode == 0x1 || opcode == 0x29));
- ginsn_func = (opcode == 0x1) ? ginsn_new_add : ginsn_new_sub;
-
- /* op %reg, symbol or even other cases where destination involves indirect
- access are unnecessary for SCFI correctness. TBD_GINSN_GEN_NOT_SCFI. */
- if (i.mem_operands)
- return ginsn;
-
- /* Skip detection of 8/16/32-bit op size; 'add/sub reg, reg/mem' ops always
- make the dest reg untraceable for SCFI. */
-
- /* op reg, reg/mem. */
- src1_dw2_regnum = ginsn_dw2_regnum (i.op[0].regs);
- /* Of interest only when second opnd is not memory. */
- if (i.reg_operands == 2)
- {
- dw2_regnum = ginsn_dw2_regnum (i.op[1].regs);
- ginsn = ginsn_func (insn_end_sym, true,
- GINSN_SRC_REG, src1_dw2_regnum, 0,
- GINSN_SRC_REG, dw2_regnum, 0,
- GINSN_DST_REG, dw2_regnum, 0);
- ginsn_set_where (ginsn);
- }
-
- return ginsn;
-}
-
-static ginsnS *
-x86_ginsn_addsub_mem_reg (const symbolS *insn_end_sym)
-{
- unsigned int dw2_regnum;
- unsigned int src1_dw2_regnum;
- const reg_entry *mem_reg;
- int32_t gdisp = 0;
- ginsnS *ginsn = NULL;
- ginsnS * (*ginsn_func) (const symbolS *, bool,
- enum ginsn_src_type, unsigned int, offsetT,
- enum ginsn_src_type, unsigned int, offsetT,
- enum ginsn_dst_type, unsigned int, offsetT);
- uint16_t opcode = i.tm.base_opcode;
-
- gas_assert (i.tm.opcode_space == SPACE_BASE
- && (opcode == 0x3 || opcode == 0x2b));
- ginsn_func = (opcode == 0x3) ? ginsn_new_add : ginsn_new_sub;
-
- /* op symbol, %reg. */
- if (i.mem_operands && !i.base_reg && !i.index_reg)
- return ginsn;
-
- /* Skip detection of 8/16/32-bit op size; 'add/sub reg/mem, reg' ops always
- make the dest reg untraceable for SCFI. */
-
- /* op reg/mem, %reg. */
- dw2_regnum = ginsn_dw2_regnum (i.op[1].regs);
-
- if (i.reg_operands == 2)
- {
- src1_dw2_regnum = ginsn_dw2_regnum (i.op[0].regs);
- ginsn = ginsn_func (insn_end_sym, true,
- GINSN_SRC_REG, src1_dw2_regnum, 0,
- GINSN_SRC_REG, dw2_regnum, 0,
- GINSN_DST_REG, dw2_regnum, 0);
- ginsn_set_where (ginsn);
- }
- else if (i.mem_operands)
- {
- mem_reg = (i.base_reg) ? i.base_reg : i.index_reg;
- src1_dw2_regnum = ginsn_dw2_regnum (mem_reg);
- if (i.disp_operands == 1)
- gdisp = i.op[0].disps->X_add_number;
- ginsn = ginsn_func (insn_end_sym, true,
- GINSN_SRC_INDIRECT, src1_dw2_regnum, gdisp,
- GINSN_SRC_REG, dw2_regnum, 0,
- GINSN_DST_REG, dw2_regnum, 0);
- ginsn_set_where (ginsn);
- }
-
- return ginsn;
-}
-
-static ginsnS *
-x86_ginsn_alu_imm (const symbolS *insn_end_sym)
-{
- offsetT src_imm;
- unsigned int dw2_regnum;
- ginsnS *ginsn = NULL;
- enum ginsn_src_type src_type = GINSN_SRC_REG;
- enum ginsn_dst_type dst_type = GINSN_DST_REG;
-
- ginsnS * (*ginsn_func) (const symbolS *, bool,
- enum ginsn_src_type, unsigned int, offsetT,
- enum ginsn_src_type, unsigned int, offsetT,
- enum ginsn_dst_type, unsigned int, offsetT);
-
- /* FIXME - create ginsn where dest is REG_SP / REG_FP only ? */
- /* Map for insn.tm.extension_opcode
- 000 ADD 100 AND
- 001 OR 101 SUB
- 010 ADC 110 XOR
- 011 SBB 111 CMP */
-
- /* add/sub/and imm, %reg only at this time for SCFI.
- Although all three ('and', 'or' , 'xor') make the destination reg
- untraceable, 'and' op is handled but not 'or' / 'xor' because we will look
- into supporting the DRAP pattern at some point. Other opcodes ('adc',
- 'sbb' and 'cmp') are not generated here either. The ginsn representation
- does not have support for the latter three opcodes; GINSN_TYPE_OTHER may
- be added for these after x86_ginsn_unhandled () invocation if the
- destination register is REG_SP or REG_FP. */
- if (i.tm.extension_opcode == 5)
- ginsn_func = ginsn_new_sub;
- else if (i.tm.extension_opcode == 4)
- ginsn_func = ginsn_new_and;
- else if (i.tm.extension_opcode == 0)
- ginsn_func = ginsn_new_add;
- else
- return ginsn;
-
- /* TBD_GINSN_REPRESENTATION_LIMIT: There is no representation for when a
- symbol is used as an operand, like so:
- addq $simd_cmp_op+8, %rdx
- Skip generating any ginsn for this. */
- if (i.imm_operands == 1
- && i.op[0].imms->X_op != O_constant)
- return ginsn;
-
- /* addq $1, symbol
- addq $1, -16(%rbp)
- These are not of interest for SCFI. Also, TBD_GINSN_GEN_NOT_SCFI. */
- if (i.mem_operands == 1)
- return ginsn;
-
- /* 8/16/32-bit op size makes the destination reg untraceable for SCFI.
- Deal with this via the x86_ginsn_unhandled () code path. */
- if (i.suffix != QWORD_MNEM_SUFFIX)
- return ginsn;
-
- gas_assert (i.imm_operands == 1);
- src_imm = i.op[0].imms->X_add_number;
- /* The second operand may be a register or indirect access. For SCFI, only
- the case when the second opnd is a register is interesting. Revisit this
- if generating ginsns for a different gen mode TBD_GINSN_GEN_NOT_SCFI. */
- if (i.reg_operands == 1)
- {
- dw2_regnum = ginsn_dw2_regnum (i.op[1].regs);
- /* For ginsn, keep the imm as second src operand. */
- ginsn = ginsn_func (insn_end_sym, true,
- src_type, dw2_regnum, 0,
- GINSN_SRC_IMM, 0, src_imm,
- dst_type, dw2_regnum, 0);
-
- ginsn_set_where (ginsn);
- }
-
- return ginsn;
-}
+ switch (r_type)
+ {
+ case BFD_RELOC_386_TLS_GOTDESC:
+ /* Check GDesc access model:
-/* Create ginsn(s) for MOV operations.
+ leal x@tlsdesc(%ebx), %reg32 --> Memory reg must be %ebx and
+ SIB is not supported.
+ */
+ if (i.tm.mnem_off != MN_lea)
+ return x86_tls_error_insn;
+ if (i.index_reg)
+ return x86_tls_error_sib;
+ if (!i.base_reg)
+ return x86_tls_error_no_base_reg;
+ if (i.base_reg->reg_type.bitfield.instance != RegB)
+ return x86_tls_error_ebx;
+ if (!i.op[1].regs->reg_type.bitfield.dword)
+ return x86_tls_error_dest_32bit_reg_size;
+ break;
- The generated ginsns corresponding to mov with indirect access to memory
- (src or dest) suffer with loss of information: when both index and base
- registers are at play, only base register gets conveyed in ginsn. Note
- this TBD_GINSN_GEN_NOT_SCFI. */
+ case BFD_RELOC_386_TLS_GD:
+ /* Check GD access model:
-static ginsnS *
-x86_ginsn_move (const symbolS *insn_end_sym)
-{
- ginsnS *ginsn = NULL;
- unsigned int dst_reg;
- unsigned int src_reg;
- offsetT src_disp = 0;
- offsetT dst_disp = 0;
- const reg_entry *dst = NULL;
- const reg_entry *src = NULL;
- uint16_t opcode = i.tm.base_opcode;
- enum ginsn_src_type src_type = GINSN_SRC_REG;
- enum ginsn_dst_type dst_type = GINSN_DST_REG;
-
- /* mov %reg, symbol or mov symbol, %reg.
- Not of interest for SCFI. Also, TBD_GINSN_GEN_NOT_SCFI. */
- if (i.mem_operands == 1 && !i.base_reg && !i.index_reg)
- return ginsn;
-
- /* 8/16/32-bit op size makes the destination reg untraceable for SCFI.
- Handle mov reg, reg only. mov to or from a memory operand will make
- dest reg, when present, untraceable, irrespective of the op size. */
- if (i.reg_operands == 2 && i.suffix != QWORD_MNEM_SUFFIX)
- return ginsn;
-
- gas_assert (i.tm.opcode_space == SPACE_BASE);
- if (opcode == 0x8b || opcode == 0x8a)
- {
- /* mov disp(%reg), %reg. */
- if (i.mem_operands)
+ leal foo@tlsgd(,%ebx,1), %eax --> Only this fixed format is supported.
+ leal foo@tlsgd(%reg32), %eax --> Dest reg must be '%eax'
+ Memory reg can't be %eax.
+ */
+ if (i.tm.mnem_off != MN_lea)
+ return x86_tls_error_insn;
+ if (i.op[1].regs->reg_type.bitfield.instance != Accum)
+ return x86_tls_error_dest_eax;
+ if (!i.op[1].regs->reg_type.bitfield.dword)
+ return x86_tls_error_dest_32bit_reg_size;
+ if (i.index_reg)
{
- src = (i.base_reg) ? i.base_reg : i.index_reg;
- if (i.disp_operands == 1)
- src_disp = i.op[0].disps->X_add_number;
- src_type = GINSN_SRC_INDIRECT;
+ if (i.base_reg)
+ return x86_tls_error_base_reg;
+ if (i.index_reg->reg_type.bitfield.instance != RegB)
+ return x86_tls_error_index_ebx;
+ if (i.log2_scale_factor)
+ return x86_tls_error_scale_factor;
}
else
- src = i.op[0].regs;
-
- dst = i.op[1].regs;
- }
- else if (opcode == 0x89 || opcode == 0x88)
- {
- /* mov %reg, disp(%reg). */
- src = i.op[0].regs;
- if (i.mem_operands)
{
- dst = (i.base_reg) ? i.base_reg : i.index_reg;
- if (i.disp_operands == 1)
- dst_disp = i.op[1].disps->X_add_number;
- dst_type = GINSN_DST_INDIRECT;
+ if (!i.base_reg)
+ return x86_tls_error_no_base_reg;
+ if (i.base_reg->reg_type.bitfield.instance == Accum)
+ return x86_tls_error_eax;
}
- else
- dst = i.op[1].regs;
- }
-
- src_reg = ginsn_dw2_regnum (src);
- dst_reg = ginsn_dw2_regnum (dst);
+ break;
- ginsn = ginsn_new_mov (insn_end_sym, true,
- src_type, src_reg, src_disp,
- dst_type, dst_reg, dst_disp);
- ginsn_set_where (ginsn);
+ case BFD_RELOC_386_TLS_LDM:
+ /* Check LDM access model:
- return ginsn;
-}
+ leal foo@tlsldm(%reg32), %eax --> Dest reg must be '%eax'
+ Memory reg can't be %eax and SIB
+ is not supported.
+ */
+ if (i.tm.mnem_off != MN_lea)
+ return x86_tls_error_insn;
+ if (i.index_reg)
+ return x86_tls_error_sib;
+ if (!i.base_reg)
+ return x86_tls_error_no_base_reg;
+ if (i.base_reg->reg_type.bitfield.instance == Accum)
+ return x86_tls_error_eax;
+ if (i.op[1].regs->reg_type.bitfield.instance != Accum)
+ return x86_tls_error_dest_eax;
+ if (!i.op[1].regs->reg_type.bitfield.dword)
+ return x86_tls_error_dest_32bit_reg_size;
+ break;
-/* Generate appropriate ginsn for lea.
+ case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
+ /* Check GOTPC32 TLSDESC access model:
- Unhandled sub-cases (marked with TBD_GINSN_GEN_NOT_SCFI) also suffer with
- some loss of information in the final ginsn chosen eventually (type
- GINSN_TYPE_OTHER). But this is fine for now for GINSN_GEN_SCFI generation
- mode. */
+ --- LP64 mode ---
+ leaq x@tlsdesc(%rip), %reg64 --> Memory reg must be %rip.
-static ginsnS *
-x86_ginsn_lea (const symbolS *insn_end_sym)
-{
- offsetT src_disp = 0;
- ginsnS *ginsn = NULL;
- unsigned int src1_reg;
- const reg_entry *src1;
- offsetT index_scale;
- unsigned int dst_reg;
- bool index_regiz_p;
-
- if ((!i.base_reg) != (!i.index_reg || i.index_reg->reg_num == RegIZ))
- {
- /* lea disp(%base), %dst or lea disp(,%index,imm), %dst.
- Either index_reg or base_reg exists, but not both. Further, as per
- above, the case when just %index exists but is equal to RegIZ is
- excluded. If not excluded, a GINSN_TYPE_MOV of %rsi
- (GINSN_DW2_REGNUM_RSI_DUMMY) to %dst will be generated by this block.
- Such a mov ginsn is imprecise; so, exclude now and generate
- GINSN_TYPE_OTHER instead later via the x86_ginsn_unhandled ().
- Excluding other cases is required due to
- TBD_GINSN_REPRESENTATION_LIMIT. */
-
- index_scale = i.log2_scale_factor;
- index_regiz_p = i.index_reg && i.index_reg->reg_num == RegIZ;
- src1 = i.base_reg ? i.base_reg : i.index_reg;
- src1_reg = ginsn_dw2_regnum (src1);
- dst_reg = ginsn_dw2_regnum (i.op[1].regs);
- /* It makes sense to represent a scale factor of 1 precisely here
- (i.e., not using GINSN_TYPE_OTHER, but rather similar to the
- base-without-index case). A non-zero scale factor is still OK if
- the index reg is zero reg.
- However, skip from here the case when disp has a symbol instead.
- TBD_GINSN_REPRESENTATION_LIMIT. */
- if ((!index_scale || index_regiz_p)
- && (!i.disp_operands || i.op[0].disps->X_op == O_constant))
- {
- if (i.disp_operands)
- src_disp = i.op[0].disps->X_add_number;
-
- if (src_disp)
- /* Generate an ADD ginsn. */
- ginsn = ginsn_new_add (insn_end_sym, true,
- GINSN_SRC_REG, src1_reg, 0,
- GINSN_SRC_IMM, 0, src_disp,
- GINSN_DST_REG, dst_reg, 0);
- else
- /* Generate a MOV ginsn. */
- ginsn = ginsn_new_mov (insn_end_sym, true,
- GINSN_SRC_REG, src1_reg, 0,
- GINSN_DST_REG, dst_reg, 0);
-
- ginsn_set_where (ginsn);
- }
- }
- /* Skip handling other cases here,
- - when (i.index_reg && i.base_reg) is true,
- e.g., lea disp(%base,%index,imm), %dst
- We do not have a ginsn representation for multiply.
- - or, when (!i.index_reg && !i.base_reg) is true,
- e.g., lea symbol, %dst
- Not a frequent pattern. If %dst is a register of interest, the user is
- likely to use a MOV op anyway.
- Deal with these via the x86_ginsn_unhandled () code path to generate
- GINSN_TYPE_OTHER when necessary. TBD_GINSN_GEN_NOT_SCFI. */
-
- return ginsn;
-}
+ --- X32 mode ---
+ rex/rex2 leal x@tlsdesc(%rip), %reg32 --> Memory reg must be %rip.
-static ginsnS *
-x86_ginsn_jump (const symbolS *insn_end_sym, bool cond_p)
-{
- ginsnS *ginsn = NULL;
- const symbolS *src_symbol;
- ginsnS * (*ginsn_func) (const symbolS *sym, bool real_p,
- enum ginsn_src_type src_type, unsigned int src_reg,
- const symbolS *src_ginsn_sym);
+ In X32 mode, gas will add rex/rex2 for it later, no need to check
+ here.
+ */
+ if (i.tm.mnem_off != MN_lea)
+ return x86_tls_error_insn;
+ if (!i.base_reg)
+ return x86_tls_error_no_base_reg;
+ if (i.base_reg->reg_num != RegIP
+ || !i.base_reg->reg_type.bitfield.qword)
+ return x86_tls_error_rip;
+ if (x86_elf_abi == X86_64_ABI)
+ {
+ if (!i.op[1].regs->reg_type.bitfield.qword)
+ return x86_tls_error_dest_64bit_reg_size;
+ }
+ else if (!i.op[1].regs->reg_type.bitfield.dword
+ && !i.op[1].regs->reg_type.bitfield.qword)
+ return x86_tls_error_dest_32bit_or_64bit_reg_size;
+ break;
- gas_assert (i.disp_operands == 1);
+ case BFD_RELOC_X86_64_TLSGD:
+ /* Check GD access model:
- ginsn_func = cond_p ? ginsn_new_jump_cond : ginsn_new_jump;
- if (i.op[0].disps->X_op == O_symbol && !i.op[0].disps->X_add_number)
- {
- src_symbol = i.op[0].disps->X_add_symbol;
- ginsn = ginsn_func (insn_end_sym, true,
- GINSN_SRC_SYMBOL, 0, src_symbol);
+ leaq foo@tlsgd(%rip), %rdi --> Only this fixed format is supported.
+ */
+ case BFD_RELOC_X86_64_TLSLD:
+ /* Check LD access model:
- ginsn_set_where (ginsn);
- }
- else
- {
- /* A non-zero addend in jump/JCC target makes control-flow tracking
- difficult. Skip SCFI for now. */
- as_bad (_("SCFI: `%s' insn with non-zero addend to sym not supported"),
- cond_p ? "JCC" : "jmp");
- return ginsn;
- }
+ leaq foo@tlsld(%rip), %rdi --> Only this fixed format is supported.
+ */
+ if (i.tm.mnem_off != MN_lea)
+ return x86_tls_error_insn;
+ if (!i.base_reg)
+ return x86_tls_error_no_base_reg;
+ if (i.base_reg->reg_num != RegIP
+ || !i.base_reg->reg_type.bitfield.qword)
+ return x86_tls_error_rip;
+ if (!i.op[1].regs->reg_type.bitfield.qword
+ || i.op[1].regs->reg_num != EDI_REG_NUM
+ || i.op[1].regs->reg_flags)
+ return x86_tls_error_dest_rdi;
+ break;
- return ginsn;
-}
+ case BFD_RELOC_386_TLS_GOTIE:
+ /* Check GOTIE access model:
-static ginsnS *
-x86_ginsn_enter (const symbolS *insn_end_sym)
-{
- ginsnS *ginsn = NULL;
- ginsnS *ginsn_next = NULL;
- ginsnS *ginsn_last = NULL;
- /* In 64-bit mode, the default stack update size is 8 bytes. */
- int stack_opnd_size = 8;
-
- gas_assert (i.imm_operands == 2);
-
- /* For non-zero size operands, bail out as untraceable for SCFI. */
- if (i.op[0].imms->X_op != O_constant || i.op[0].imms->X_add_symbol != 0
- || i.op[1].imms->X_op != O_constant || i.op[1].imms->X_add_symbol != 0)
- {
- as_bad ("SCFI: enter insn with non-zero operand not supported");
- return ginsn;
- }
-
- /* Check if this is a 16-bit op. */
- if (ginsn_opsize_prefix_p ())
- stack_opnd_size = 2;
-
- /* If the nesting level is 0, the processor pushes the frame pointer from
- the BP/EBP/RBP register onto the stack, copies the current stack
- pointer from the SP/ESP/RSP register into the BP/EBP/RBP register, and
- loads the SP/ESP/RSP register with the current stack-pointer value
- minus the value in the size operand. */
- ginsn = ginsn_new_sub (insn_end_sym, false,
- GINSN_SRC_REG, REG_SP, 0,
- GINSN_SRC_IMM, 0, stack_opnd_size,
- GINSN_DST_REG, REG_SP, 0);
- ginsn_set_where (ginsn);
- ginsn_next = ginsn_new_store (insn_end_sym, false,
- GINSN_SRC_REG, REG_FP,
- GINSN_DST_INDIRECT, REG_SP, 0);
- ginsn_set_where (ginsn_next);
- gas_assert (!ginsn_link_next (ginsn, ginsn_next));
- ginsn_last = ginsn_new_mov (insn_end_sym, false,
- GINSN_SRC_REG, REG_SP, 0,
- GINSN_DST_REG, REG_FP, 0);
- ginsn_set_where (ginsn_last);
- gas_assert (!ginsn_link_next (ginsn_next, ginsn_last));
-
- return ginsn;
-}
+ subl foo@gotntpoff(%reg1), %reg2
+ movl foo@gotntpoff(%reg1), %reg2
+ addl foo@gotntpoff(%reg1), %reg2
-static ginsnS *
-x86_ginsn_leave (const symbolS *insn_end_sym)
-{
- ginsnS *ginsn = NULL;
- ginsnS *ginsn_next = NULL;
- ginsnS *ginsn_last = NULL;
- /* In 64-bit mode, the default stack update size is 8 bytes. */
- int stack_opnd_size = 8;
-
- /* Check if this is a 16-bit op. */
- if (ginsn_opsize_prefix_p ())
- stack_opnd_size = 2;
-
- /* The 'leave' instruction copies the contents of the RBP register
- into the RSP register to release all stack space allocated to the
- procedure. */
- ginsn = ginsn_new_mov (insn_end_sym, false,
- GINSN_SRC_REG, REG_FP, 0,
- GINSN_DST_REG, REG_SP, 0);
- ginsn_set_where (ginsn);
- /* Then it restores the old value of the RBP register from the stack. */
- ginsn_next = ginsn_new_load (insn_end_sym, false,
- GINSN_SRC_INDIRECT, REG_SP, 0,
- GINSN_DST_REG, REG_FP);
- ginsn_set_where (ginsn_next);
- gas_assert (!ginsn_link_next (ginsn, ginsn_next));
- ginsn_last = ginsn_new_add (insn_end_sym, false,
- GINSN_SRC_REG, REG_SP, 0,
- GINSN_SRC_IMM, 0, stack_opnd_size,
- GINSN_DST_REG, REG_SP, 0);
- ginsn_set_where (ginsn_next);
- gas_assert (!ginsn_link_next (ginsn_next, ginsn_last));
-
- return ginsn;
-}
+ Memory operand: SIB is not supported.
+ */
+ case BFD_RELOC_386_TLS_IE_32:
+ /* Check IE_32 access model:
-/* Check if an instruction is whitelisted.
+ subl foo@gottpoff(%reg1), %reg2
+ movl foo@gottpoff(%reg1), %reg2
+ addl foo@gottpoff(%reg1), %reg2
- Some instructions may appear with REG_SP or REG_FP as destination, because
- which they are deemed 'interesting' for SCFI. Whitelist them here if they
- do not affect SCFI correctness. */
+ Memory operand: SIB is not supported.
+ */
+ if (i.tm.mnem_off != MN_sub
+ && i.tm.mnem_off != MN_add
+ && i.tm.mnem_off != MN_mov)
+ return x86_tls_error_insn;
+ if (i.imm_operands
+ || i.disp_operands != 1
+ || i.reg_operands != 1
+ || i.types[1].bitfield.class != Reg)
+ return x86_tls_error_opcode;
+ if (!i.base_reg)
+ return x86_tls_error_no_base_reg;
+ if (i.index_reg)
+ return x86_tls_error_sib;
+ if (!i.base_reg->reg_type.bitfield.dword)
+ return x86_tls_error_base_reg_size;
+ if (!i.op[1].regs->reg_type.bitfield.dword)
+ return x86_tls_error_dest_32bit_reg_size;
+ break;
-static bool
-x86_ginsn_safe_to_skip_p (void)
-{
- bool skip_p = false;
- uint16_t opcode = i.tm.base_opcode;
+ case BFD_RELOC_386_TLS_IE:
+ /* Check IE access model:
- switch (opcode)
- {
- case 0x80:
- case 0x81:
- case 0x83:
- if (i.tm.opcode_space != SPACE_BASE)
- break;
- /* cmp imm, reg/rem. */
- if (i.tm.extension_opcode == 7)
- skip_p = true;
+ movl foo@indntpoff, %reg32 --> Mod == 00 && r/m == 5
+ addl foo@indntpoff, %reg32 --> Mod == 00 && r/m == 5
+ */
+ if (i.tm.mnem_off != MN_add && i.tm.mnem_off != MN_mov)
+ return x86_tls_error_insn;
+ if (i.imm_operands
+ || i.disp_operands != 1
+ || i.reg_operands != 1
+ || i.types[1].bitfield.class != Reg)
+ return x86_tls_error_opcode;
+ if (i.base_reg || i.index_reg)
+ return x86_tls_error_require_no_base_index_reg;
+ if (!i.op[1].regs->reg_type.bitfield.dword)
+ return x86_tls_error_dest_32bit_reg_size;
break;
- case 0x38:
- case 0x39:
- case 0x3a:
- case 0x3b:
- if (i.tm.opcode_space != SPACE_BASE)
- break;
- /* cmp imm/reg/mem, reg/rem. */
- skip_p = true;
- break;
+ case BFD_RELOC_X86_64_GOTTPOFF:
+ /* Check GOTTPOFF access model:
- case 0xf6:
- case 0xf7:
- case 0x84:
- case 0x85:
- /* test imm/reg/mem, reg/mem. */
- if (i.tm.opcode_space != SPACE_BASE)
- break;
- skip_p = true;
+ mov foo@gottpoff(%rip), %reg --> Memory Reg must be %rip.
+ add foo@gottpoff(%rip), %reg --> Memory Reg must be %rip.
+ add %reg1, foo@gottpoff(%rip), %reg2 --> Memory Reg must be %rip.
+ add foo@gottpoff(%rip), %reg1, %reg2 --> Memory Reg must be %rip.
+ */
+ if (i.tm.mnem_off != MN_add && i.tm.mnem_off != MN_mov)
+ return x86_tls_error_insn;
+ if (i.imm_operands
+ || i.disp_operands != 1
+ || i.types[i.operands - 1].bitfield.class != Reg)
+ return x86_tls_error_opcode;
+ if (!i.base_reg)
+ return x86_tls_error_no_base_reg;
+ if (i.base_reg->reg_num != RegIP
+ || !i.base_reg->reg_type.bitfield.qword)
+ return x86_tls_error_rip;
+ if (x86_elf_abi == X86_64_ABI)
+ {
+ if (!i.op[i.operands - 1].regs->reg_type.bitfield.qword)
+ return x86_tls_error_dest_64bit_reg_size;
+ }
+ else if (!i.op[i.operands - 1].regs->reg_type.bitfield.dword
+ && !i.op[i.operands - 1].regs->reg_type.bitfield.qword)
+ return x86_tls_error_dest_32bit_or_64bit_reg_size;
break;
- default:
- break;
- }
+ case BFD_RELOC_386_TLS_DESC_CALL:
+ /* Check GDesc access model:
- return skip_p;
-}
+ call *x@tlscall(%eax) --> Memory reg must be %eax and
+ SIB is not supported.
+ */
+ case BFD_RELOC_X86_64_TLSDESC_CALL:
+ /* Check GDesc access model:
+
+ call *x@tlscall(%rax) <--- LP64 mode.
+ call *x@tlscall(%eax) <--- X32 mode.
-#define X86_GINSN_UNHANDLED_NONE 0
-#define X86_GINSN_UNHANDLED_DEST_REG 1
-#define X86_GINSN_UNHANDLED_CFG 2
-#define X86_GINSN_UNHANDLED_STACKOP 3
-#define X86_GINSN_UNHANDLED_UNEXPECTED 4
+ Only these fixed formats are supported.
+ */
+ if (i.tm.mnem_off != MN_call)
+ return x86_tls_error_insn;
+ if (i.index_reg)
+ return x86_tls_error_sib;
+ if (!i.base_reg)
+ return x86_tls_error_no_base_reg;
+ if (i.base_reg->reg_type.bitfield.instance != Accum)
+ return x86_tls_error_RegA;
+ break;
-/* Check the input insn for its impact on the correctness of the synthesized
- CFI. Returns an error code to the caller. */
+ case BFD_RELOC_NONE:
+ /* This isn't a relocation. */
+ return x86_tls_error_continue;
-static int
-x86_ginsn_unhandled (void)
-{
- int err = X86_GINSN_UNHANDLED_NONE;
- const reg_entry *reg_op;
- unsigned int dw2_regnum;
-
- /* Keep an eye out for instructions affecting control flow. */
- if (i.tm.opcode_modifier.jump)
- err = X86_GINSN_UNHANDLED_CFG;
- /* Also, for any instructions involving an implicit update to the stack
- pointer. */
- else if (i.tm.opcode_modifier.operandconstraint == IMPLICIT_STACK_OP)
- err = X86_GINSN_UNHANDLED_STACKOP;
- /* Finally, also check if the missed instructions are affecting REG_SP or
- REG_FP. The destination operand is the last at all stages of assembly
- (due to following AT&T syntax layout in the internal representation). In
- case of Intel syntax input, this still remains true as swap_operands ()
- is done by now.
- PS: These checks do not involve index / base reg, as indirect memory
- accesses via REG_SP or REG_FP do not affect SCFI correctness.
- (Also note these instructions are candidates for other ginsn generation
- modes in future. TBD_GINSN_GEN_NOT_SCFI.) */
- else if (i.operands && i.reg_operands
- && !(i.flags[i.operands - 1] & Operand_Mem))
- {
- reg_op = i.op[i.operands - 1].regs;
- if (reg_op)
- {
- dw2_regnum = ginsn_dw2_regnum (reg_op);
- if (dw2_regnum == REG_SP || dw2_regnum == REG_FP)
- err = X86_GINSN_UNHANDLED_DEST_REG;
- }
- else
- /* Something unexpected. Indicate to caller. */
- err = X86_GINSN_UNHANDLED_UNEXPECTED;
+ default:
+ break;
}
- return err;
+ /* This relocation is OK. */
+ return x86_tls_error_none;
}
-/* Generate one or more generic GAS instructions, a.k.a, ginsns for the current
- machine instruction.
-
- Returns the head of linked list of ginsn(s) added, if success; Returns NULL
- if failure.
-
- The input ginsn_gen_mode GMODE determines the set of minimal necessary
- ginsns necessary for correctness of any passes applicable for that mode.
- For supporting the GINSN_GEN_SCFI generation mode, following is the list of
- machine instructions that must be translated into the corresponding ginsns
- to ensure correctness of SCFI:
- - All instructions affecting the two registers that could potentially
- be used as the base register for CFA tracking. For SCFI, the base
- register for CFA tracking is limited to REG_SP and REG_FP only for
- now.
- - All change of flow instructions: conditional and unconditional branches,
- call and return from functions.
- - All instructions that can potentially be a register save / restore
- operation.
- - All instructions that perform stack manipulation implicitly: the CALL,
- RET, PUSH, POP, ENTER, and LEAVE instructions.
-
- The function currently supports GINSN_GEN_SCFI ginsn generation mode only.
- To support other generation modes will require work on this target-specific
- process of creation of ginsns:
- - Some of such places are tagged with TBD_GINSN_GEN_NOT_SCFI to serve as
- possible starting points.
- - Also note that ginsn representation may need enhancements. Specifically,
- note some TBD_GINSN_INFO_LOSS and TBD_GINSN_REPRESENTATION_LIMIT markers.
- */
-
-static ginsnS *
-x86_ginsn_new (const symbolS *insn_end_sym, enum ginsn_gen_mode gmode)
+static void
+x86_report_tls_error (enum x86_tls_error_type tls_error,
+ enum bfd_reloc_code_real r_type)
{
- int err = 0;
- uint16_t opcode;
- unsigned int dw2_regnum;
- const reg_entry *mem_reg;
- ginsnS *ginsn = NULL;
- ginsnS *ginsn_next = NULL;
- /* In 64-bit mode, the default stack update size is 8 bytes. */
- int stack_opnd_size = 8;
-
- /* Currently supports generation of selected ginsns, sufficient for
- the use-case of SCFI only. */
- if (gmode != GINSN_GEN_SCFI)
- return ginsn;
-
- opcode = i.tm.base_opcode;
-
- /* Until it is clear how to handle APX NDD and other new opcodes, disallow
- them from SCFI. */
- if (is_apx_rex2_encoding ()
- || (i.tm.opcode_modifier.evex && is_apx_evex_encoding ()))
- {
- as_bad (_("SCFI: unsupported APX op %#x may cause incorrect CFI"),
- opcode);
- return ginsn;
- }
-
- switch (opcode)
- {
-
- /* Add opcodes 0x0/0x2 and sub opcodes 0x28/0x2a (with opcode_space
- SPACE_BASE) are 8-bit ops. While they are relevant for SCFI
- correctness, skip handling them here and use the x86_ginsn_unhandled
- code path to generate GINSN_TYPE_OTHER when necessary. */
-
- case 0x1: /* add reg, reg/mem. */
- case 0x29: /* sub reg, reg/mem. */
- if (i.tm.opcode_space != SPACE_BASE)
- break;
- ginsn = x86_ginsn_addsub_reg_mem (insn_end_sym);
- break;
-
- case 0x3: /* add reg/mem, reg. */
- case 0x2b: /* sub reg/mem, reg. */
- if (i.tm.opcode_space != SPACE_BASE)
- break;
- ginsn = x86_ginsn_addsub_mem_reg (insn_end_sym);
- break;
-
- case 0xa0: /* push fs. */
- case 0xa8: /* push gs. */
- /* push fs / push gs have opcode_space == SPACE_0F. */
- if (i.tm.opcode_space != SPACE_0F)
- break;
- dw2_regnum = ginsn_dw2_regnum (i.op[0].regs);
- /* Check if operation size is 16-bit. */
- if (ginsn_opsize_prefix_p ())
- stack_opnd_size = 2;
- ginsn = ginsn_new_sub (insn_end_sym, false,
- GINSN_SRC_REG, REG_SP, 0,
- GINSN_SRC_IMM, 0, stack_opnd_size,
- GINSN_DST_REG, REG_SP, 0);
- ginsn_set_where (ginsn);
- ginsn_next = ginsn_new_store (insn_end_sym, false,
- GINSN_SRC_REG, dw2_regnum,
- GINSN_DST_INDIRECT, REG_SP, 0);
- ginsn_set_where (ginsn_next);
- gas_assert (!ginsn_link_next (ginsn, ginsn_next));
+ unsigned int k;
+ for (k = 0; k < ARRAY_SIZE (gotrel); k++)
+ if (gotrel[k].rel[object_64bit] == r_type)
break;
- case 0xa1: /* pop fs. */
- case 0xa9: /* pop gs. */
- /* pop fs / pop gs have opcode_space == SPACE_0F. */
- if (i.tm.opcode_space != SPACE_0F)
- break;
- dw2_regnum = ginsn_dw2_regnum (i.op[0].regs);
- /* Check if operation size is 16-bit. */
- if (ginsn_opsize_prefix_p ())
- stack_opnd_size = 2;
- ginsn = ginsn_new_load (insn_end_sym, false,
- GINSN_SRC_INDIRECT, REG_SP, 0,
- GINSN_DST_REG, dw2_regnum);
- ginsn_set_where (ginsn);
- ginsn_next = ginsn_new_add (insn_end_sym, false,
- GINSN_SRC_REG, REG_SP, 0,
- GINSN_SRC_IMM, 0, stack_opnd_size,
- GINSN_DST_REG, REG_SP, 0);
- ginsn_set_where (ginsn_next);
- gas_assert (!ginsn_link_next (ginsn, ginsn_next));
- break;
+ switch (tls_error)
+ {
+ case x86_tls_error_insn:
+ as_bad (_("@%s operator cannot be used with `%s'"),
+ gotrel[k].str, insn_name (&i.tm));
+ return;
- case 0x50 ... 0x57:
- if (i.tm.opcode_space != SPACE_BASE)
- break;
- /* push reg. */
- dw2_regnum = ginsn_dw2_regnum (i.op[0].regs);
- /* Check if operation size is 16-bit. */
- if (ginsn_opsize_prefix_p ())
- stack_opnd_size = 2;
- ginsn = ginsn_new_sub (insn_end_sym, false,
- GINSN_SRC_REG, REG_SP, 0,
- GINSN_SRC_IMM, 0, stack_opnd_size,
- GINSN_DST_REG, REG_SP, 0);
- ginsn_set_where (ginsn);
- ginsn_next = ginsn_new_store (insn_end_sym, false,
- GINSN_SRC_REG, dw2_regnum,
- GINSN_DST_INDIRECT, REG_SP, 0);
- ginsn_set_where (ginsn_next);
- gas_assert (!ginsn_link_next (ginsn, ginsn_next));
- break;
+ case x86_tls_error_opcode:
+ as_bad (_("@%s operator can be used with `%s', but format is wrong"),
+ gotrel[k].str, insn_name (&i.tm));
+ return;
- case 0x58 ... 0x5f:
- if (i.tm.opcode_space != SPACE_BASE)
- break;
- /* pop reg. */
- dw2_regnum = ginsn_dw2_regnum (i.op[0].regs);
- ginsn = ginsn_new_load (insn_end_sym, false,
- GINSN_SRC_INDIRECT, REG_SP, 0,
- GINSN_DST_REG, dw2_regnum);
- ginsn_set_where (ginsn);
- /* Check if operation size is 16-bit. */
- if (ginsn_opsize_prefix_p ())
- stack_opnd_size = 2;
- ginsn_next = ginsn_new_add (insn_end_sym, false,
- GINSN_SRC_REG, REG_SP, 0,
- GINSN_SRC_IMM, 0, stack_opnd_size,
- GINSN_DST_REG, REG_SP, 0);
- ginsn_set_where (ginsn_next);
- gas_assert (!ginsn_link_next (ginsn, ginsn_next));
- break;
+ case x86_tls_error_sib:
+ as_bad (_("@%s operator requires no SIB"), gotrel[k].str);
+ return;
- case 0x6a: /* push imm8. */
- case 0x68: /* push imm16/imm32. */
- if (i.tm.opcode_space != SPACE_BASE)
- break;
- /* Check if operation size is 16-bit. */
- if (ginsn_opsize_prefix_p ())
- stack_opnd_size = 2;
- /* Skip getting the value of imm from machine instruction
- because this is not important for SCFI. */
- ginsn = ginsn_new_sub (insn_end_sym, false,
- GINSN_SRC_REG, REG_SP, 0,
- GINSN_SRC_IMM, 0, stack_opnd_size,
- GINSN_DST_REG, REG_SP, 0);
- ginsn_set_where (ginsn);
- ginsn_next = ginsn_new_store (insn_end_sym, false,
- GINSN_SRC_IMM, 0,
- GINSN_DST_INDIRECT, REG_SP, 0);
- ginsn_set_where (ginsn_next);
- gas_assert (!ginsn_link_next (ginsn, ginsn_next));
- break;
+ case x86_tls_error_no_base_reg:
+ as_bad (_("@%s operator requires base register"), gotrel[k].str);
+ return;
- /* PS: Opcodes 0x80 ... 0x8f with opcode_space SPACE_0F are present
- only after relaxation. They do not need to be handled for ginsn
- creation. */
- case 0x70 ... 0x7f:
- if (i.tm.opcode_space != SPACE_BASE)
- break;
- ginsn = x86_ginsn_jump (insn_end_sym, true);
- break;
+ case x86_tls_error_require_no_base_index_reg:
+ as_bad (_("@%s operator requires no base/index register"),
+ gotrel[k].str);
+ return;
- case 0x80:
- case 0x81:
- case 0x83:
- if (i.tm.opcode_space != SPACE_BASE)
- break;
- ginsn = x86_ginsn_alu_imm (insn_end_sym);
- break;
+ case x86_tls_error_base_reg:
+ as_bad (_("@%s operator requires no base register"), gotrel[k].str);
+ return;
- case 0x8a: /* mov r/m8, r8. */
- case 0x8b: /* mov r/m(16/32/64), r(16/32/64). */
- case 0x88: /* mov r8, r/m8. */
- case 0x89: /* mov r(16/32/64), r/m(16/32/64). */
- if (i.tm.opcode_space != SPACE_BASE)
- break;
- ginsn = x86_ginsn_move (insn_end_sym);
- break;
+ case x86_tls_error_index_ebx:
+ as_bad (_("@%s operator requires `%sebx' as index register"),
+ gotrel[k].str, register_prefix);
+ return;
- case 0x8d:
- if (i.tm.opcode_space != SPACE_BASE)
- break;
- /* lea disp(%base,%index,imm), %dst. */
- ginsn = x86_ginsn_lea (insn_end_sym);
- break;
+ case x86_tls_error_eax:
+ as_bad (_("@%s operator requires `%seax' as base register"),
+ gotrel[k].str, register_prefix);
+ return;
- case 0x8f:
- if (i.tm.opcode_space != SPACE_BASE)
- break;
- /* pop to reg/mem. */
- if (i.mem_operands)
- {
- mem_reg = (i.base_reg) ? i.base_reg : i.index_reg;
- /* Use dummy register if no base or index. Unlike other opcodes,
- ginsns must be generated as this affect stack pointer. */
- dw2_regnum = (mem_reg
- ? ginsn_dw2_regnum (mem_reg)
- : GINSN_DW2_REGNUM_RSI_DUMMY);
- }
- else
- dw2_regnum = ginsn_dw2_regnum (i.op[0].regs);
- ginsn = ginsn_new_load (insn_end_sym, false,
- GINSN_SRC_INDIRECT, REG_SP, 0,
- GINSN_DST_INDIRECT, dw2_regnum);
- ginsn_set_where (ginsn);
- /* Check if operation size is 16-bit. */
- if (ginsn_opsize_prefix_p ())
- stack_opnd_size = 2;
- ginsn_next = ginsn_new_add (insn_end_sym, false,
- GINSN_SRC_REG, REG_SP, 0,
- GINSN_SRC_IMM, 0, stack_opnd_size,
- GINSN_DST_REG, REG_SP, 0);
- ginsn_set_where (ginsn_next);
- gas_assert (!ginsn_link_next (ginsn, ginsn_next));
- break;
+ case x86_tls_error_RegA:
+ as_bad (_("@%s operator requires `%seax/%srax' as base register"),
+ gotrel[k].str, register_prefix, register_prefix);
+ return;
- case 0x9c:
- if (i.tm.opcode_space != SPACE_BASE)
- break;
- /* pushf / pushfq. */
- /* Check if operation size is 16-bit. */
- if (ginsn_opsize_prefix_p ())
- stack_opnd_size = 2;
- ginsn = ginsn_new_sub (insn_end_sym, false,
- GINSN_SRC_REG, REG_SP, 0,
- GINSN_SRC_IMM, 0, stack_opnd_size,
- GINSN_DST_REG, REG_SP, 0);
- ginsn_set_where (ginsn);
- /* FIXME - hardcode the actual DWARF reg number value. As for SCFI
- correctness, although this behaves simply a placeholder value; its
- just clearer if the value is correct. */
- dw2_regnum = GINSN_DW2_REGNUM_EFLAGS;
- ginsn_next = ginsn_new_store (insn_end_sym, false,
- GINSN_SRC_REG, dw2_regnum,
- GINSN_DST_INDIRECT, REG_SP, 0);
- ginsn_set_where (ginsn_next);
- gas_assert (!ginsn_link_next (ginsn, ginsn_next));
- break;
+ case x86_tls_error_ebx:
+ as_bad (_("@%s operator requires `%sebx' as base register"),
+ gotrel[k].str, register_prefix);
+ return;
- case 0x9d:
- if (i.tm.opcode_space != SPACE_BASE)
- break;
- /* popf / popfq. */
- /* Check if operation size is 16-bit. */
- if (ginsn_opsize_prefix_p ())
- stack_opnd_size = 2;
- /* FIXME - hardcode the actual DWARF reg number value. As for SCFI
- correctness, although this behaves simply a placeholder value; its
- just clearer if the value is correct. */
- dw2_regnum = GINSN_DW2_REGNUM_EFLAGS;
- ginsn = ginsn_new_load (insn_end_sym, false,
- GINSN_SRC_INDIRECT, REG_SP, 0,
- GINSN_DST_REG, dw2_regnum);
- ginsn_set_where (ginsn);
- ginsn_next = ginsn_new_add (insn_end_sym, false,
- GINSN_SRC_REG, REG_SP, 0,
- GINSN_SRC_IMM, 0, stack_opnd_size,
- GINSN_DST_REG, REG_SP, 0);
- ginsn_set_where (ginsn_next);
- gas_assert (!ginsn_link_next (ginsn, ginsn_next));
- break;
+ case x86_tls_error_rip:
+ as_bad (_("@%s operator requires `%srip' as base register"),
+ gotrel[k].str, register_prefix);
+ return;
- case 0xff:
- if (i.tm.opcode_space != SPACE_BASE)
- break;
- /* push from reg/mem. */
- if (i.tm.extension_opcode == 6)
- {
- /* Check if operation size is 16-bit. */
- if (ginsn_opsize_prefix_p ())
- stack_opnd_size = 2;
- ginsn = ginsn_new_sub (insn_end_sym, false,
- GINSN_SRC_REG, REG_SP, 0,
- GINSN_SRC_IMM, 0, stack_opnd_size,
- GINSN_DST_REG, REG_SP, 0);
- ginsn_set_where (ginsn);
- if (i.mem_operands)
- {
- mem_reg = (i.base_reg) ? i.base_reg : i.index_reg;
- /* Use dummy register if no base or index. Unlike other opcodes,
- ginsns must be generated as this affect stack pointer. */
- dw2_regnum = (mem_reg
- ? ginsn_dw2_regnum (mem_reg)
- : GINSN_DW2_REGNUM_RSI_DUMMY);
- }
- else
- dw2_regnum = ginsn_dw2_regnum (i.op[0].regs);
- ginsn_next = ginsn_new_store (insn_end_sym, false,
- GINSN_SRC_INDIRECT, dw2_regnum,
- GINSN_DST_INDIRECT, REG_SP, 0);
- ginsn_set_where (ginsn_next);
- gas_assert (!ginsn_link_next (ginsn, ginsn_next));
- }
- else if (i.tm.extension_opcode == 4)
- {
- /* jmp r/m. E.g., notrack jmp *%rax. */
- if (i.reg_operands)
- {
- dw2_regnum = ginsn_dw2_regnum (i.op[0].regs);
- ginsn = ginsn_new_jump (insn_end_sym, true,
- GINSN_SRC_REG, dw2_regnum, NULL);
- ginsn_set_where (ginsn);
- }
- else if (i.mem_operands && i.index_reg)
- {
- /* jmp *0x0(,%rax,8). */
- dw2_regnum = ginsn_dw2_regnum (i.index_reg);
- ginsn = ginsn_new_jump (insn_end_sym, true,
- GINSN_SRC_REG, dw2_regnum, NULL);
- ginsn_set_where (ginsn);
- }
- else if (i.mem_operands && i.base_reg)
- {
- dw2_regnum = ginsn_dw2_regnum (i.base_reg);
- ginsn = ginsn_new_jump (insn_end_sym, true,
- GINSN_SRC_REG, dw2_regnum, NULL);
- ginsn_set_where (ginsn);
- }
- }
- else if (i.tm.extension_opcode == 2)
- {
- /* 0xFF /2 (call). */
- if (i.reg_operands)
- {
- dw2_regnum = ginsn_dw2_regnum (i.op[0].regs);
- ginsn = ginsn_new_call (insn_end_sym, true,
- GINSN_SRC_REG, dw2_regnum, NULL);
- ginsn_set_where (ginsn);
- }
- else if (i.mem_operands && i.base_reg)
- {
- dw2_regnum = ginsn_dw2_regnum (i.base_reg);
- ginsn = ginsn_new_call (insn_end_sym, true,
- GINSN_SRC_REG, dw2_regnum, NULL);
- ginsn_set_where (ginsn);
- }
- }
- break;
+ case x86_tls_error_dest_eax:
+ as_bad (_("@%s operator requires `%seax' as dest register"),
+ gotrel[k].str, register_prefix);
+ return;
- case 0xc2: /* ret imm16. */
- case 0xc3: /* ret. */
- if (i.tm.opcode_space != SPACE_BASE)
- break;
- /* Near ret. */
- ginsn = ginsn_new_return (insn_end_sym, true);
- ginsn_set_where (ginsn);
- break;
+ case x86_tls_error_dest_rdi:
+ as_bad (_("@%s operator requires `%srdi' as dest register"),
+ gotrel[k].str, register_prefix);
+ return;
- case 0xc8:
- if (i.tm.opcode_space != SPACE_BASE)
- break;
- /* enter. */
- ginsn = x86_ginsn_enter (insn_end_sym);
- break;
+ case x86_tls_error_scale_factor:
+ as_bad (_("@%s operator requires scale factor of 1"),
+ gotrel[k].str);
+ return;
- case 0xc9:
- if (i.tm.opcode_space != SPACE_BASE)
- break;
- /* leave. */
- ginsn = x86_ginsn_leave (insn_end_sym);
- break;
+ case x86_tls_error_base_reg_size:
+ as_bad (_("@%s operator requires 32-bit base register"),
+ gotrel[k].str);
+ return;
- case 0xe0 ... 0xe2: /* loop / loope / loopne. */
- case 0xe3: /* jecxz / jrcxz. */
- if (i.tm.opcode_space != SPACE_BASE)
- break;
- ginsn = x86_ginsn_jump (insn_end_sym, true);
- ginsn_set_where (ginsn);
- break;
+ case x86_tls_error_dest_32bit_reg_size:
+ as_bad (_("@%s operator requires 32-bit dest register"),
+ gotrel[k].str);
+ return;
- case 0xe8:
- if (i.tm.opcode_space != SPACE_BASE)
- break;
- /* PS: SCFI machinery does not care about which func is being
- called. OK to skip that info. */
- ginsn = ginsn_new_call (insn_end_sym, true,
- GINSN_SRC_SYMBOL, 0, NULL);
- ginsn_set_where (ginsn);
- break;
+ case x86_tls_error_dest_64bit_reg_size:
+ as_bad (_("@%s operator requires 64-bit dest register"),
+ gotrel[k].str);
+ return;
- /* PS: opcode 0xe9 appears only after relaxation. Skip here. */
- case 0xeb:
- /* If opcode_space != SPACE_BASE, this is not a jmp insn. Skip it
- for GINSN_GEN_SCFI. */
- if (i.tm.opcode_space != SPACE_BASE)
- break;
- /* Unconditional jmp. */
- ginsn = x86_ginsn_jump (insn_end_sym, false);
- ginsn_set_where (ginsn);
- break;
+ case x86_tls_error_dest_32bit_or_64bit_reg_size:
+ as_bad (_("@%s operator requires 32-bit or 64-bit dest register"),
+ gotrel[k].str);
+ return;
default:
- /* TBD_GINSN_GEN_NOT_SCFI: Skip all other opcodes uninteresting for
- GINSN_GEN_SCFI mode. */
- break;
- }
-
- if (!ginsn && !x86_ginsn_safe_to_skip_p ())
- {
- /* For all unhandled insns that are not whitelisted, check that they do
- not impact SCFI correctness. */
- err = x86_ginsn_unhandled ();
- switch (err)
- {
- case X86_GINSN_UNHANDLED_NONE:
- break;
- case X86_GINSN_UNHANDLED_DEST_REG:
- /* Not all writes to REG_FP are harmful in context of SCFI. Simply
- generate a GINSN_TYPE_OTHER with destination set to the
- appropriate register. The SCFI machinery will bail out if this
- ginsn affects SCFI correctness. */
- dw2_regnum = ginsn_dw2_regnum (i.op[i.operands - 1].regs);
- ginsn = ginsn_new_other (insn_end_sym, true,
- GINSN_SRC_IMM, 0,
- GINSN_SRC_IMM, 0,
- GINSN_DST_REG, dw2_regnum);
- ginsn_set_where (ginsn);
- break;
- case X86_GINSN_UNHANDLED_CFG:
- case X86_GINSN_UNHANDLED_STACKOP:
- as_bad (_("SCFI: unhandled op %#x may cause incorrect CFI"), opcode);
- break;
- case X86_GINSN_UNHANDLED_UNEXPECTED:
- as_bad (_("SCFI: unexpected op %#x may cause incorrect CFI"),
- opcode);
- break;
- default:
- abort ();
- break;
- }
+ abort ();
}
-
- return ginsn;
}
-
#endif
/* This is the guts of the machine-dependent assembler. LINE points to a
machine dependent instruction. This function is supposed to emit
the frags/bytes it assembles to. */
-void
-md_assemble (char *line)
+static void
+i386_assemble (char *line)
{
unsigned int j;
char mnemonic[MAX_MNEM_SIZE], mnem_suffix = 0, *copy = NULL;
char *xstrdup_copy = NULL;
const char *end, *pass1_mnem = NULL;
enum i386_error pass1_err = 0;
+ struct pseudo_prefixes orig_pp = pp;
const insn_template *t;
struct last_insn *last_insn
= &seg_info(now_seg)->tc_segment_info_data.last_insn;
@@ -7211,13 +6918,13 @@ md_assemble (char *line)
/* Suppress optimization when the last thing we saw may not have been
a proper instruction (e.g. a stand-alone prefix or .byte). */
if (last_insn->kind != last_insn_other)
- i.no_optimize = true;
+ pp.no_optimize = true;
/* First parse an instruction mnemonic & call i386_operand for the operands.
We assume that the scrubber has arranged it so that line[0] is the valid
start of a (possibly prefixed) mnemonic. */
- end = parse_insn (line, mnemonic, false);
+ end = parse_insn (line, mnemonic, parse_all);
if (end == NULL)
{
if (pass1_mnem != NULL)
@@ -7324,6 +7031,7 @@ md_assemble (char *line)
no_match:
pass1_err = i.error;
pass1_mnem = insn_name (current_templates.start);
+ pp = orig_pp;
goto retry;
}
@@ -7519,11 +7227,28 @@ md_assemble (char *line)
}
/* Zap the redundant prefix from XCHG when optimizing. */
- if (i.tm.base_opcode == 0x86 && optimize && !i.no_optimize)
+ if (i.tm.base_opcode == 0x86 && optimize && !pp.no_optimize)
i.prefix[LOCK_PREFIX] = 0;
}
- if (is_any_vex_encoding (&i.tm)
+#ifdef OBJ_ELF
+ if (i.has_gotrel && tls_check)
+ {
+ enum x86_tls_error_type tls_error;
+ for (j = 0; j < i.operands; ++j)
+ {
+ tls_error = x86_check_tls_relocation (i.reloc[j]);
+ if (tls_error == x86_tls_error_continue)
+ continue;
+
+ if (tls_error != x86_tls_error_none)
+ x86_report_tls_error (tls_error, i.reloc[j]);
+ break;
+ }
+ }
+#endif
+
+ if ((is_any_vex_encoding (&i.tm) && i.tm.opcode_space != SPACE_EVEXMAP4)
|| i.tm.operand_types[i.imm_operands].bitfield.class >= RegMMX
|| i.tm.operand_types[i.imm_operands + 1].bitfield.class >= RegMMX)
{
@@ -7533,30 +7258,6 @@ md_assemble (char *line)
as_bad (_("data size prefix invalid with `%s'"), insn_name (&i.tm));
return;
}
-
- /* Don't allow e.g. KMOV in TLS code sequences. */
- for (j = i.imm_operands; j < i.operands; ++j)
- switch (i.reloc[j])
- {
- case BFD_RELOC_X86_64_GOTTPOFF:
- if (i.tm.mnem_off == MN_add
- && i.tm.opcode_space == SPACE_EVEXMAP4
- && i.mem_operands == 1
- && i.base_reg
- && i.base_reg->reg_num == RegIP
- && i.tm.operand_types[0].bitfield.class == Reg
- && i.tm.operand_types[2].bitfield.class == Reg)
- /* Allow APX: add %reg1, foo@gottpoff(%rip), %reg2. */
- break;
- /* Fall through. */
- case BFD_RELOC_386_TLS_GOTIE:
- case BFD_RELOC_386_TLS_LE_32:
- case BFD_RELOC_X86_64_TLSLD:
- as_bad (_("TLS relocation cannot be used with `%s'"), insn_name (&i.tm));
- return;
- default:
- break;
- }
}
/* Check if HLE prefix is OK. */
@@ -7613,24 +7314,24 @@ md_assemble (char *line)
return;
}
- if (optimize && !i.no_optimize && i.tm.opcode_modifier.optimize)
+ if (optimize && !pp.no_optimize && i.tm.opcode_modifier.optimize)
{
- if (i.has_nf)
+ if (pp.has_nf)
optimize_nf_encoding ();
optimize_encoding ();
}
/* Past optimization there's no need to distinguish encoding_evex,
encoding_evex512, and encoding_egpr anymore. */
- if (i.encoding == encoding_evex512)
- i.encoding = encoding_evex;
- else if (i.encoding == encoding_egpr)
- i.encoding = is_any_vex_encoding (&i.tm) ? encoding_evex
+ if (pp.encoding == encoding_evex512)
+ pp.encoding = encoding_evex;
+ else if (pp.encoding == encoding_egpr)
+ pp.encoding = is_any_vex_encoding (&i.tm) ? encoding_evex
: encoding_default;
/* Similarly {nf} can now be taken to imply {evex}. */
- if (i.has_nf && i.encoding == encoding_default)
- i.encoding = encoding_evex;
+ if (pp.has_nf && pp.encoding == encoding_default)
+ pp.encoding = encoding_evex;
if (use_unaligned_vector_move)
encode_with_unaligned_vector_move ();
@@ -7703,21 +7404,29 @@ md_assemble (char *line)
}
/* Check for explicit REX prefix. */
- if (i.prefix[REX_PREFIX] || i.rex_encoding)
+ if ((i.prefix[REX_PREFIX]
+ && (i.tm.opcode_space != SPACE_EVEXMAP4
+ /* To mimic behavior for legacy insns, permit use of REX64 for promoted
+ legacy instructions. */
+ || i.prefix[REX_PREFIX] != (REX_OPCODE | REX_W)))
+ || pp.rex_encoding)
{
as_bad (_("REX prefix invalid with `%s'"), insn_name (&i.tm));
return;
}
/* Check for explicit REX2 prefix. */
- if (i.rex2_encoding)
+ if (pp.rex2_encoding)
{
as_bad (_("{rex2} prefix invalid with `%s'"), insn_name (&i.tm));
return;
}
if (is_apx_evex_encoding ())
- build_apx_evex_prefix ();
+ {
+ if (!build_apx_evex_prefix ())
+ return;
+ }
else if (i.tm.opcode_modifier.vex)
build_vex_prefix (t);
else
@@ -7757,10 +7466,10 @@ md_assemble (char *line)
/* We are ready to output the insn. */
output_insn (last_insn);
-#if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
+#ifdef OBJ_ELF
/* PS: SCFI is enabled only for System V AMD64 ABI. The ABI check has been
performed in i386_target_format. */
- if (IS_ELF && flag_synth_cfi)
+ if (flag_synth_cfi)
{
ginsnS *ginsn;
ginsn = x86_ginsn_new (symbol_temp_new_now (), frch_ginsn_gen_mode ());
@@ -7780,6 +7489,14 @@ md_assemble (char *line)
last_insn->kind = last_insn_other;
}
+void
+md_assemble (char *line)
+{
+ i386_assemble (line);
+ current_templates.start = NULL;
+ memset (&pp, 0, sizeof (pp));
+}
+
/* The Q suffix is generally valid only in 64-bit mode, with very few
exceptions: fild, fistp, fisttp, and cmpxchg8b. Note that for fild
and fisttp only one of their two templates is matched below: That's
@@ -7795,7 +7512,7 @@ static INLINE bool q_suffix_allowed(const insn_template *t)
}
static const char *
-parse_insn (const char *line, char *mnemonic, bool prefix_only)
+parse_insn (const char *line, char *mnemonic, enum parse_mode mode)
{
const char *l = line, *token_start = l;
char *mnem_p;
@@ -7806,13 +7523,19 @@ parse_insn (const char *line, char *mnemonic, bool prefix_only)
while (1)
{
+ const char *split;
+
mnem_p = mnemonic;
/* Pseudo-prefixes start with an opening figure brace. */
if ((*mnem_p = *l) == '{')
{
++mnem_p;
++l;
+ if (is_space_char (*l))
+ ++l;
}
+ else if (mode == parse_pseudo_prefix)
+ break;
while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
{
if (*mnem_p == '.')
@@ -7826,6 +7549,9 @@ parse_insn (const char *line, char *mnemonic, bool prefix_only)
}
l++;
}
+ split = l;
+ if (is_space_char (*l))
+ ++l;
/* Pseudo-prefixes end with a closing figure brace. */
if (*mnemonic == '{' && *l == '}')
{
@@ -7834,20 +7560,18 @@ parse_insn (const char *line, char *mnemonic, bool prefix_only)
goto too_long;
*mnem_p = '\0';
- /* Point l at the closing brace if there's no other separator. */
- if (*l != END_OF_INSN && !is_space_char (*l)
- && *l != PREFIX_SEPARATOR)
- --l;
+ if (is_space_char (*l))
+ ++l;
}
- else if (!is_space_char (*l)
+ else if (l == split
&& *l != END_OF_INSN
&& (intel_syntax
|| (*l != PREFIX_SEPARATOR && *l != ',')))
{
- if (prefix_only)
+ if (mode != parse_all)
break;
as_bad (_("invalid character %s in mnemonic"),
- output_invalid (*l));
+ output_invalid (*split));
return NULL;
}
if (token_start == l)
@@ -7863,7 +7587,6 @@ parse_insn (const char *line, char *mnemonic, bool prefix_only)
op_lookup (mnemonic);
if (*l != END_OF_INSN
- && (!is_space_char (*l) || l[1] != END_OF_INSN)
&& current_templates.start
&& current_templates.start->opcode_modifier.isprefix)
{
@@ -7904,58 +7627,58 @@ parse_insn (const char *line, char *mnemonic, bool prefix_only)
{
case Prefix_Disp8:
/* {disp8} */
- i.disp_encoding = disp_encoding_8bit;
+ pp.disp_encoding = disp_encoding_8bit;
break;
case Prefix_Disp16:
/* {disp16} */
- i.disp_encoding = disp_encoding_16bit;
+ pp.disp_encoding = disp_encoding_16bit;
break;
case Prefix_Disp32:
/* {disp32} */
- i.disp_encoding = disp_encoding_32bit;
+ pp.disp_encoding = disp_encoding_32bit;
break;
case Prefix_Load:
/* {load} */
- i.dir_encoding = dir_encoding_load;
+ pp.dir_encoding = dir_encoding_load;
break;
case Prefix_Store:
/* {store} */
- i.dir_encoding = dir_encoding_store;
+ pp.dir_encoding = dir_encoding_store;
break;
case Prefix_VEX:
/* {vex} */
- i.encoding = encoding_vex;
+ pp.encoding = encoding_vex;
break;
case Prefix_VEX3:
/* {vex3} */
- i.encoding = encoding_vex3;
+ pp.encoding = encoding_vex3;
break;
case Prefix_EVEX:
/* {evex} */
- i.encoding = encoding_evex;
+ pp.encoding = encoding_evex;
break;
case Prefix_REX:
/* {rex} */
- i.rex_encoding = true;
+ pp.rex_encoding = true;
break;
case Prefix_REX2:
/* {rex2} */
- i.rex2_encoding = true;
+ pp.rex2_encoding = true;
break;
case Prefix_NF:
/* {nf} */
- i.has_nf = true;
+ pp.has_nf = true;
break;
case Prefix_NoOptimize:
/* {nooptimize} */
- i.no_optimize = true;
+ pp.no_optimize = true;
break;
default:
abort ();
}
- if (i.has_nf
- && i.encoding != encoding_default
- && i.encoding != encoding_evex)
+ if (pp.has_nf
+ && pp.encoding != encoding_default
+ && pp.encoding != encoding_evex)
{
as_bad (_("{nf} cannot be combined with {vex}/{vex3}"));
return NULL;
@@ -7985,13 +7708,16 @@ parse_insn (const char *line, char *mnemonic, bool prefix_only)
}
}
/* Skip past PREFIX_SEPARATOR and reset token_start. */
- token_start = ++l;
+ l += (!intel_syntax && *l == PREFIX_SEPARATOR);
+ if (is_space_char (*l))
+ ++l;
+ token_start = l;
}
else
break;
}
- if (prefix_only)
+ if (mode != parse_all)
return token_start;
if (!current_templates.start)
@@ -8001,19 +7727,19 @@ parse_insn (const char *line, char *mnemonic, bool prefix_only)
encoding. */
if (mnem_p - 2 == dot_p && dot_p[1] == 's')
{
- if (i.dir_encoding == dir_encoding_default)
- i.dir_encoding = dir_encoding_swap;
+ if (pp.dir_encoding == dir_encoding_default)
+ pp.dir_encoding = dir_encoding_swap;
else
as_warn (_("ignoring `.s' suffix due to earlier `{%s}'"),
- i.dir_encoding == dir_encoding_load ? "load" : "store");
+ pp.dir_encoding == dir_encoding_load ? "load" : "store");
}
else if (mnem_p - 3 == dot_p
&& dot_p[1] == 'd'
&& dot_p[2] == '8')
{
- if (i.disp_encoding == disp_encoding_default)
- i.disp_encoding = disp_encoding_8bit;
- else if (i.disp_encoding != disp_encoding_8bit)
+ if (pp.disp_encoding == disp_encoding_default)
+ pp.disp_encoding = disp_encoding_8bit;
+ else if (pp.disp_encoding != disp_encoding_8bit)
as_warn (_("ignoring `.d8' suffix due to earlier `{disp<N>}'"));
}
else if (mnem_p - 4 == dot_p
@@ -8021,9 +7747,9 @@ parse_insn (const char *line, char *mnemonic, bool prefix_only)
&& dot_p[2] == '3'
&& dot_p[3] == '2')
{
- if (i.disp_encoding == disp_encoding_default)
- i.disp_encoding = disp_encoding_32bit;
- else if (i.disp_encoding != disp_encoding_32bit)
+ if (pp.disp_encoding == disp_encoding_default)
+ pp.disp_encoding = disp_encoding_32bit;
+ else if (pp.disp_encoding != disp_encoding_32bit)
as_warn (_("ignoring `.d32' suffix due to earlier `{disp<N>}'"));
}
else
@@ -8077,8 +7803,7 @@ parse_insn (const char *line, char *mnemonic, bool prefix_only)
}
/* For compatibility reasons accept MOVSD and CMPSD without
operands even in AT&T mode. */
- else if (*l == END_OF_INSN
- || (is_space_char (*l) && l[1] == END_OF_INSN))
+ else if (*l == END_OF_INSN)
{
mnem_p[-1] = '\0';
op_lookup (mnemonic);
@@ -8120,8 +7845,9 @@ parse_insn (const char *line, char *mnemonic, bool prefix_only)
l += length;
}
- if (current_templates.start->opcode_modifier.jump == JUMP
- || current_templates.start->opcode_modifier.jump == JUMP_BYTE)
+ if ((current_templates.start->opcode_modifier.jump == JUMP
+ || current_templates.start->opcode_modifier.jump == JUMP_BYTE)
+ && *l == ',')
{
/* Check for a branch hint. We allow ",pt" and ",pn" for
predict taken and predict not taken respectively.
@@ -8129,21 +7855,29 @@ parse_insn (const char *line, char *mnemonic, bool prefix_only)
and jcxz insns (JumpByte) for current Pentium4 chips. They
may work in the future and it doesn't hurt to accept them
now. */
- if (l[0] == ',' && l[1] == 'p')
+ token_start = l++;
+ if (is_space_char (*l))
+ ++l;
+ if (TOLOWER (*l) == 'p' && ISALPHA (l[1])
+ && (l[2] == END_OF_INSN || is_space_char (l[2])))
{
- if (l[2] == 't')
+ if (TOLOWER (l[1]) == 't')
{
if (!add_prefix (DS_PREFIX_OPCODE))
return NULL;
- l += 3;
+ l += 2;
}
- else if (l[2] == 'n')
+ else if (TOLOWER (l[1]) == 'n')
{
if (!add_prefix (CS_PREFIX_OPCODE))
return NULL;
- l += 3;
+ l += 2;
}
+ else
+ l = token_start;
}
+ else
+ l = token_start;
}
/* Any other comma loses. */
if (*l == ',')
@@ -8305,6 +8039,19 @@ parse_operands (char *l, const char *mnemonic)
}
static void
+copy_operand (unsigned int to, unsigned int from)
+{
+ i.types[to] = i.types[from];
+ i.tm.operand_types[to] = i.tm.operand_types[from];
+ i.flags[to] = i.flags[from];
+ i.op[to] = i.op[from];
+ i.reloc[to] = i.reloc[from];
+ i.imm_bits[to] = i.imm_bits[from];
+ /* Note: i.mask and i.broadcast aren't handled here, as what (if
+ anything) to do there depends on context. */
+}
+
+static void
swap_2_operands (unsigned int xchg1, unsigned int xchg2)
{
union i386_op temp_op;
@@ -8422,7 +8169,8 @@ optimize_imm (void)
/* A more generic (but also more involved) way of dealing
with the special case(s) would be to go look for
DefaultSize attributes on any of the templates. */
- && current_templates.start->mnem_off != MN_push))
+ && current_templates.start->mnem_off != MN_push
+ && current_templates.start->mnem_off != MN_jmpabs))
guess_suffix = LONG_MNEM_SUFFIX;
for (op = i.operands; --op >= 0;)
@@ -8568,10 +8316,10 @@ optimize_disp (const insn_template *t)
}
}
- /* Don't optimize displacement for movabs since it only takes 64bit
- displacement. */
- if (i.disp_encoding > disp_encoding_8bit
- || (flag_code == CODE_64BIT && t->mnem_off == MN_movabs))
+ /* Don't optimize displacement for movabs / jmpabs since they only take
+ 64-bit displacement. */
+ if (pp.disp_encoding > disp_encoding_8bit
+ || t->mnem_off == MN_movabs || t->mnem_off == MN_jmpabs)
return true;
for (op = i.operands; op-- > 0;)
@@ -8957,18 +8705,22 @@ check_VecOperands (const insn_template *t)
return 1;
}
- /* Non-EVEX.LIG forms need to have a ZMM register as at least one
- operand. */
- if (t->opcode_modifier.evex != EVEXLIG)
- {
- for (op = 0; op < t->operands; ++op)
- if (i.types[op].bitfield.zmmword)
- break;
- if (op >= t->operands)
- {
- i.error = operand_size_mismatch;
- return 1;
- }
+ /* Non-EVEX.{LIG,512,256} forms need to have a ZMM or YMM register as at
+ least one operand. For YMM register or EVEX256, we will need AVX10.2
+ enabled. There's no need to check all operands, though: Either of the
+ last two operands will be of the right size in all relevant templates. */
+ if (t->opcode_modifier.evex != EVEXLIG
+ && t->opcode_modifier.evex != EVEX512
+ && (t->opcode_modifier.evex != EVEX256
+ || !cpu_arch_flags.bitfield.cpuavx10_2)
+ && !i.types[t->operands - 1].bitfield.zmmword
+ && !i.types[t->operands - 2].bitfield.zmmword
+ && ((!i.types[t->operands - 1].bitfield.ymmword
+ && !i.types[t->operands - 2].bitfield.ymmword)
+ || !cpu_arch_flags.bitfield.cpuavx10_2))
+ {
+ i.error = operand_size_mismatch;
+ return 1;
}
}
@@ -8979,9 +8731,9 @@ check_VecOperands (const insn_template *t)
&& (is_cpu (t, CpuAPX_F)
|| (t->opcode_modifier.sse2avx && t->opcode_modifier.evex
&& (!t->opcode_modifier.vex
- || (i.encoding != encoding_default
- && i.encoding != encoding_vex
- && i.encoding != encoding_vex3))))))
+ || (pp.encoding != encoding_default
+ && pp.encoding != encoding_vex
+ && pp.encoding != encoding_vex3))))))
{
if (i.op[0].imms->X_op != O_constant
|| !fits_in_imm4 (i.op[0].imms->X_add_number))
@@ -8999,7 +8751,7 @@ check_VecOperands (const insn_template *t)
if (t->opcode_modifier.disp8memshift
&& (!t->opcode_modifier.vex
|| need_evex_encoding (t))
- && i.disp_encoding <= disp_encoding_8bit)
+ && pp.disp_encoding <= disp_encoding_8bit)
{
if (i.broadcast.type || i.broadcast.bytes)
i.memshift = t->opcode_modifier.broadcast - 1;
@@ -9079,7 +8831,7 @@ check_VecOperands (const insn_template *t)
static int
VEX_check_encoding (const insn_template *t)
{
- if (i.encoding == encoding_error)
+ if (pp.encoding == encoding_error)
{
i.error = unsupported;
return 1;
@@ -9096,7 +8848,7 @@ VEX_check_encoding (const insn_template *t)
return 1;
}
- switch (i.encoding)
+ switch (pp.encoding)
{
case encoding_vex:
case encoding_vex3:
@@ -9109,7 +8861,7 @@ VEX_check_encoding (const insn_template *t)
break;
case encoding_default:
- if (!i.has_nf)
+ if (!pp.has_nf)
break;
/* Fall through. */
case encoding_evex:
@@ -9166,7 +8918,7 @@ check_EgprOperands (const insn_template *t)
}
/* Check if pseudo prefix {rex2} is valid. */
- if (i.rex2_encoding && !t->opcode_modifier.sse2avx)
+ if (pp.rex2_encoding && !t->opcode_modifier.sse2avx)
{
i.error = invalid_pseudo_prefix;
return true;
@@ -9222,7 +8974,7 @@ check_Rex_required (void)
return true;
/* Check pseudo prefix {rex} are valid. */
- return i.rex_encoding;
+ return pp.rex_encoding;
}
/* Optimize APX NDD insns to legacy insns. */
@@ -9231,7 +8983,7 @@ can_convert_NDD_to_legacy (const insn_template *t)
{
unsigned int match_dest_op = ~0;
- if (!i.has_nf && i.reg_operands >= 2)
+ if (!pp.has_nf && i.reg_operands >= 2)
{
unsigned int dest = i.operands - 1;
unsigned int src1 = i.operands - 2;
@@ -9323,7 +9075,7 @@ match_template (char mnem_suffix)
/* Check NF support. */
specific_error = progress (unsupported_nf);
- if (i.has_nf && !t->opcode_modifier.nf)
+ if (pp.has_nf && !t->opcode_modifier.nf)
continue;
/* Check Intel64/AMD64 ISA. */
@@ -9412,7 +9164,7 @@ match_template (char mnem_suffix)
}
/* Check if pseudo prefix {rex2} is valid. */
- if (t->opcode_modifier.noegpr && i.rex2_encoding)
+ if (t->opcode_modifier.noegpr && pp.rex2_encoding)
{
specific_error = progress (invalid_pseudo_prefix);
continue;
@@ -9486,9 +9238,9 @@ match_template (char mnem_suffix)
{store} pseudo prefix on an applicable insn. */
if (!t->opcode_modifier.modrm
&& i.reg_operands == 1
- && ((i.dir_encoding == dir_encoding_load
+ && ((pp.dir_encoding == dir_encoding_load
&& t->mnem_off != MN_pop)
- || (i.dir_encoding == dir_encoding_store
+ || (pp.dir_encoding == dir_encoding_store
&& t->mnem_off != MN_push))
/* Avoid BSWAP. */
&& t->mnem_off != MN_bswap)
@@ -9511,8 +9263,8 @@ match_template (char mnem_suffix)
/* Allow the ModR/M encoding to be requested by using the
{load} or {store} pseudo prefix. */
- if (i.dir_encoding == dir_encoding_load
- || i.dir_encoding == dir_encoding_store)
+ if (pp.dir_encoding == dir_encoding_load
+ || pp.dir_encoding == dir_encoding_store)
continue;
}
@@ -9531,7 +9283,7 @@ match_template (char mnem_suffix)
/* Allow the ModR/M encoding to be requested by using a suitable
{load} or {store} pseudo prefix. */
- if (i.dir_encoding == (i.types[0].bitfield.instance == Accum
+ if (pp.dir_encoding == (i.types[0].bitfield.instance == Accum
? dir_encoding_store
: dir_encoding_load)
&& !i.types[0].bitfield.disp64
@@ -9544,21 +9296,21 @@ match_template (char mnem_suffix)
if (!t->opcode_modifier.modrm
&& i.reg_operands == 1
&& i.imm_operands == 1
- && (i.dir_encoding == dir_encoding_load
- || i.dir_encoding == dir_encoding_store)
+ && (pp.dir_encoding == dir_encoding_load
+ || pp.dir_encoding == dir_encoding_store)
&& t->opcode_space == SPACE_BASE)
{
if (t->base_opcode == 0xb0 /* mov $imm, %reg */
- && i.dir_encoding == dir_encoding_store)
+ && pp.dir_encoding == dir_encoding_store)
continue;
if ((t->base_opcode | 0x38) == 0x3c /* <alu> $imm, %acc */
&& (t->base_opcode != 0x3c /* cmp $imm, %acc */
- || i.dir_encoding == dir_encoding_load))
+ || pp.dir_encoding == dir_encoding_load))
continue;
if (t->base_opcode == 0xa8 /* test $imm, %acc */
- && i.dir_encoding == dir_encoding_load)
+ && pp.dir_encoding == dir_encoding_load)
continue;
}
/* Fall through. */
@@ -9578,7 +9330,7 @@ match_template (char mnem_suffix)
if (t->opcode_modifier.d && i.reg_operands == i.operands
&& !operand_type_all_zero (&overlap1))
- switch (i.dir_encoding)
+ switch (pp.dir_encoding)
{
case dir_encoding_load:
if (operand_type_check (operand_types[j], anymem)
@@ -9600,8 +9352,8 @@ match_template (char mnem_suffix)
}
/* If we want store form, we skip the current load. */
- if ((i.dir_encoding == dir_encoding_store
- || i.dir_encoding == dir_encoding_swap)
+ if ((pp.dir_encoding == dir_encoding_store
+ || pp.dir_encoding == dir_encoding_swap)
&& i.mem_operands == 0
&& t->opcode_modifier.load)
continue;
@@ -9670,22 +9422,29 @@ match_template (char mnem_suffix)
goto check_operands_345;
}
else if (t->opcode_space == SPACE_EVEXMAP4
- && t->opcode_modifier.w)
+ && t->operands >= 3)
{
found_reverse_match = Opcode_D;
goto check_operands_345;
}
+ else if (t->opcode_modifier.commutative
+ /* CFCMOVcc also wants its major opcode unaltered. */
+ || (t->opcode_space == SPACE_EVEXMAP4
+ && (t->base_opcode | 0xf) == 0x4f))
+ found_reverse_match = ~0;
else if (t->opcode_space != SPACE_BASE
+ && (t->opcode_space != SPACE_EVEXMAP4
+ /* MOVBE, originating from SPACE_0F38, also
+ belongs here. */
+ || t->mnem_off == MN_movbe)
&& (t->opcode_space != SPACE_0F
/* MOV to/from CR/DR/TR, as an exception, follow
the base opcode space encoding model. */
|| (t->base_opcode | 7) != 0x27))
found_reverse_match = (t->base_opcode & 0xee) != 0x6e
? Opcode_ExtD : Opcode_SIMD_IntD;
- else if (!t->opcode_modifier.commutative)
- found_reverse_match = Opcode_D;
else
- found_reverse_match = ~0;
+ found_reverse_match = Opcode_D;
}
else
{
@@ -9815,8 +9574,8 @@ match_template (char mnem_suffix)
add %r8, %r16, %r8 -> add %r16, %r8, then rematch template.
Note that the semantics have not been changed. */
if (optimize
- && !i.no_optimize
- && i.encoding != encoding_evex
+ && !pp.no_optimize
+ && pp.encoding != encoding_evex
&& ((t + 1 < current_templates.end
&& !t[1].opcode_modifier.evex
&& t[1].opcode_space <= SPACE_0F38
@@ -9971,6 +9730,9 @@ match_template (char mnem_suffix)
/* Fall through. */
case ~0:
+ if (i.tm.opcode_space == SPACE_EVEXMAP4
+ && !t->opcode_modifier.commutative)
+ i.tm.opcode_modifier.operandconstraint = EVEX_NF;
i.tm.operand_types[0] = operand_types[i.operands - 1];
i.tm.operand_types[i.operands - 1] = operand_types[0];
break;
@@ -10490,7 +10252,7 @@ process_suffix (const insn_template *t)
? i.op[1].regs->reg_type.bitfield.word
: i.op[1].regs->reg_type.bitfield.dword)
&& ((i.base_reg == NULL && i.index_reg == NULL)
-#if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
+#ifdef OBJ_ELF
|| (x86_elf_abi == X86_64_X32_ABI
&& i.base_reg
&& i.base_reg->reg_num == RegIP
@@ -10850,8 +10612,8 @@ process_operands (void)
need converting. */
i.rex |= i.prefix[REX_PREFIX] & (REX_W | REX_R | REX_X | REX_B);
i.prefix[REX_PREFIX] = 0;
- i.rex_encoding = 0;
- i.rex2_encoding = 0;
+ pp.rex_encoding = 0;
+ pp.rex2_encoding = 0;
}
/* ImmExt should be processed after SSE2AVX. */
else if (i.tm.opcode_modifier.immext)
@@ -10863,11 +10625,8 @@ process_operands (void)
number 0. */
if (i.tm.mnem_off == MN_tilezero)
{
- i.op[1].regs = i.op[0].regs;
+ copy_operand (1, 0);
i.op[0].regs -= i.op[0].regs->reg_num;
- i.types[1] = i.types[0];
- i.tm.operand_types[1] = i.tm.operand_types[0];
- i.flags[1] = i.flags[0];
i.operands++;
i.reg_operands++;
i.tm.operands++;
@@ -10905,12 +10664,7 @@ process_operands (void)
/* Add the implicit xmm0 for instructions with VEX prefix
and 3 sources. */
for (j = i.operands; j > 0; j--)
- {
- i.op[j] = i.op[j - 1];
- i.types[j] = i.types[j - 1];
- i.tm.operand_types[j] = i.tm.operand_types[j - 1];
- i.flags[j] = i.flags[j - 1];
- }
+ copy_operand (j, j - 1);
i.op[0].regs
= (const reg_entry *) str_hash_find (reg_hash, "xmm0");
i.types[0] = regxmm;
@@ -10922,10 +10676,6 @@ process_operands (void)
dupl++;
dest++;
- i.op[dupl] = i.op[dest];
- i.types[dupl] = i.types[dest];
- i.tm.operand_types[dupl] = i.tm.operand_types[dest];
- i.flags[dupl] = i.flags[dest];
}
else
{
@@ -10933,15 +10683,12 @@ process_operands (void)
i.operands++;
i.reg_operands++;
i.tm.operands++;
-
- i.op[dupl] = i.op[dest];
- i.types[dupl] = i.types[dest];
- i.tm.operand_types[dupl] = i.tm.operand_types[dest];
- i.flags[dupl] = i.flags[dest];
}
- if (i.tm.opcode_modifier.immext)
- process_immext ();
+ copy_operand (dupl, dest);
+
+ if (i.tm.opcode_modifier.immext)
+ process_immext ();
}
else if (i.tm.operand_types[0].bitfield.instance == Accum
&& i.tm.opcode_modifier.modrm)
@@ -10949,16 +10696,7 @@ process_operands (void)
unsigned int j;
for (j = 1; j < i.operands; j++)
- {
- i.op[j - 1] = i.op[j];
- i.types[j - 1] = i.types[j];
-
- /* We need to adjust fields in i.tm since they are used by
- build_modrm_byte. */
- i.tm.operand_types [j - 1] = i.tm.operand_types [j];
-
- i.flags[j - 1] = i.flags[j];
- }
+ copy_operand (j - 1, j);
/* No adjustment to i.reg_operands: This was already done at the top
of the function. */
@@ -11080,7 +10818,7 @@ process_operands (void)
if (dot_insn () && i.reg_operands == 2)
{
gas_assert (is_any_vex_encoding (&i.tm)
- || i.encoding != encoding_default);
+ || pp.encoding != encoding_default);
i.vex.register_specifier = i.op[i.operands - 1].regs;
}
}
@@ -11090,7 +10828,7 @@ process_operands (void)
== InstanceNone)
{
gas_assert (is_any_vex_encoding (&i.tm)
- || i.encoding != encoding_default);
+ || pp.encoding != encoding_default);
i.vex.register_specifier = i.op[i.operands - 1].regs;
}
@@ -11099,7 +10837,7 @@ process_operands (void)
{
if (!quiet_warnings)
as_warn (_("segment override on `%s' is ineffectual"), insn_name (&i.tm));
- if (optimize && !i.no_optimize)
+ if (optimize && !pp.no_optimize)
{
i.seg[0] = NULL;
i.prefix[SEG_PREFIX] = 0;
@@ -11198,7 +10936,7 @@ build_modrm_byte (void)
}
exp->X_add_number |= register_number (i.op[reg_slot].regs)
<< (3 + !(i.tm.opcode_modifier.evex
- || i.encoding == encoding_evex));
+ || pp.encoding == encoding_evex));
}
switch (i.tm.opcode_modifier.vexvvvv)
@@ -11353,7 +11091,7 @@ build_modrm_byte (void)
if (operand_type_check (i.types[op], disp) == 0)
{
/* fake (%bp) into 0(%bp) */
- if (i.disp_encoding == disp_encoding_16bit)
+ if (pp.disp_encoding == disp_encoding_16bit)
i.types[op].bitfield.disp16 = 1;
else
i.types[op].bitfield.disp8 = 1;
@@ -11368,10 +11106,10 @@ build_modrm_byte (void)
}
if (!fake_zero_displacement
&& !i.disp_operands
- && i.disp_encoding)
+ && pp.disp_encoding)
{
fake_zero_displacement = 1;
- if (i.disp_encoding == disp_encoding_8bit)
+ if (pp.disp_encoding == disp_encoding_8bit)
i.types[op].bitfield.disp8 = 1;
else
i.types[op].bitfield.disp16 = 1;
@@ -11400,7 +11138,7 @@ build_modrm_byte (void)
if (i.base_reg->reg_num == 5 && i.disp_operands == 0)
{
fake_zero_displacement = 1;
- if (i.disp_encoding == disp_encoding_32bit)
+ if (pp.disp_encoding == disp_encoding_32bit)
i.types[op].bitfield.disp32 = 1;
else
i.types[op].bitfield.disp8 = 1;
@@ -11438,10 +11176,10 @@ build_modrm_byte (void)
{
if (!fake_zero_displacement
&& !i.disp_operands
- && i.disp_encoding)
+ && pp.disp_encoding)
{
fake_zero_displacement = 1;
- if (i.disp_encoding == disp_encoding_8bit)
+ if (pp.disp_encoding == disp_encoding_8bit)
i.types[op].bitfield.disp8 = 1;
else
i.types[op].bitfield.disp32 = 1;
@@ -11561,7 +11299,7 @@ output_branch (void)
}
code16 = flag_code == CODE_16BIT ? CODE16 : 0;
- size = i.disp_encoding > disp_encoding_8bit ? BIG : SMALL;
+ size = pp.disp_encoding > disp_encoding_8bit ? BIG : SMALL;
prefix = 0;
if (i.prefix[DATA_PREFIX] != 0)
@@ -11636,17 +11374,14 @@ output_branch (void)
frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
}
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+/* PLT32 relocation is ELF only. */
+#ifdef OBJ_ELF
/* Return TRUE iff PLT32 relocation should be used for branching to
symbol S. */
static bool
need_plt32_p (symbolS *s)
{
- /* PLT32 relocation is ELF only. */
- if (!IS_ELF)
- return false;
-
#ifdef TE_SOLARIS
/* Don't emit PLT32 relocation on Solaris: neither native linker nor
krtld support it. */
@@ -11758,7 +11493,7 @@ output_jump (void)
abort ();
}
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+#ifdef OBJ_ELF
if (flag_code == CODE_64BIT && size == 4
&& jump_reloc == NO_RELOC && i.op[0].disps->X_add_number == 0
&& need_plt32_p (i.op[0].disps->X_add_symbol))
@@ -11862,7 +11597,66 @@ output_interseg_jump (void)
i.op[0].imms, 0, reloc (2, 0, 0, i.reloc[0]));
}
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+/* Hook used to reject pseudo-prefixes misplaced at the start of a line. */
+
+void i386_start_line (void)
+{
+ struct pseudo_prefixes last_pp;
+
+ memcpy (&last_pp, &pp, sizeof (pp));
+ memset (&pp, 0, sizeof (pp));
+ if (memcmp (&pp, &last_pp, sizeof (pp)))
+ as_bad_where (frag_now->fr_file, frag_now->fr_line,
+ _("pseudo prefix without instruction"));
+}
+
+/* Hook used to warn about pseudo-prefixes ahead of a label. */
+
+bool i386_check_label (void)
+{
+ struct pseudo_prefixes last_pp;
+
+ memcpy (&last_pp, &pp, sizeof (pp));
+ memset (&pp, 0, sizeof (pp));
+ if (memcmp (&pp, &last_pp, sizeof (pp)))
+ as_warn (_("pseudo prefix ahead of label; ignoring"));
+ return true;
+}
+
+/* Hook used to parse pseudo-prefixes off of the start of a line. */
+
+int
+i386_unrecognized_line (int ch)
+{
+ char mnemonic[MAX_MNEM_SIZE];
+ const char *end;
+
+ if (ch != '{')
+ return 0;
+
+ --input_line_pointer;
+ know (*input_line_pointer == ch);
+
+ end = parse_insn (input_line_pointer, mnemonic, parse_pseudo_prefix);
+ if (end == NULL)
+ {
+ /* Diagnostic was already issued. */
+ ignore_rest_of_line ();
+ memset (&pp, 0, sizeof (pp));
+ return 1;
+ }
+
+ if (end == input_line_pointer)
+ {
+ ++input_line_pointer;
+ return 0;
+ }
+
+ input_line_pointer += end - input_line_pointer;
+ return 1;
+}
+
+#ifdef OBJ_ELF
void
x86_cleanup (void)
{
@@ -11875,7 +11669,7 @@ x86_cleanup (void)
unsigned int isa_1_descsz_raw, feature_2_descsz_raw;
unsigned int padding;
- if (!IS_ELF || !x86_used_note)
+ if (!x86_used_note)
return;
x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_X86;
@@ -11982,6 +11776,9 @@ x86_cleanup (void)
subseg_set (seg, subseg);
}
+#include "tc-i386-ginsn.c"
+
+/* Whether SFrame stack trace info is supported. */
bool
x86_support_sframe_p (void)
{
@@ -11989,6 +11786,7 @@ x86_support_sframe_p (void)
return (x86_elf_abi == X86_64_ABI);
}
+/* Whether SFrame return address tracking is needed. */
bool
x86_sframe_ra_tracking_p (void)
{
@@ -11998,6 +11796,8 @@ x86_sframe_ra_tracking_p (void)
return false;
}
+/* The fixed offset from CFA for SFrame to recover the return address.
+ (useful only when SFrame RA tracking is not needed). */
offsetT
x86_sframe_cfa_ra_offset (void)
{
@@ -12005,6 +11805,7 @@ x86_sframe_cfa_ra_offset (void)
return (offsetT) -8;
}
+/* The abi/arch indentifier for SFrame. */
unsigned char
x86_sframe_get_abi_arch (void)
{
@@ -12277,8 +12078,8 @@ output_insn (const struct last_insn *last_insn)
or never be used. */
enum mf_jcc_kind mf_jcc = mf_jcc_jo;
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
- if (IS_ELF && x86_used_note && now_seg != absolute_section)
+#ifdef OBJ_ELF
+ if (x86_used_note && now_seg != absolute_section)
{
if ((i.xstate & xstate_tmm) == xstate_tmm
|| is_cpu (&i.tm, CpuAMX_TILE))
@@ -12553,7 +12354,7 @@ output_insn (const struct last_insn *last_insn)
abort ();
}
-#if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
+#ifdef OBJ_ELF
/* For x32, add a dummy REX_OPCODE prefix for mov/add with
R_X86_64_GOTTPOFF relocation so that linker can safely
perform IE->LE optimization. A dummy REX_OPCODE prefix
@@ -13168,10 +12969,7 @@ x86_address_bytes (void)
return stdoutput->arch_info->bits_per_address / 8;
}
-#if (!(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
- || defined (LEX_AT)) && !defined (TE_PE)
-# define lex_got(reloc, adjust, types) NULL
-#else
+#if (defined (OBJ_ELF) || defined (OBJ_MACH_O) || defined (TE_PE))
/* Parse operands of the form
<symbol>@GOTOFF+<nnn>
and similar .plt or .got references.
@@ -13191,103 +12989,9 @@ lex_got (enum bfd_reloc_code_real *rel,
we don't yet know the operand size (this will be set by insn
matching). Hence we record the word32 relocation here,
and adjust the reloc according to the real size in reloc(). */
- static const struct
- {
- const char *str;
- int len;
- const enum bfd_reloc_code_real rel[2];
- const i386_operand_type types64;
- bool need_GOT_symbol;
- }
- gotrel[] =
- {
-
-#define OPERAND_TYPE_IMM32_32S_DISP32 { .bitfield = \
- { .imm32 = 1, .imm32s = 1, .disp32 = 1 } }
-#define OPERAND_TYPE_IMM32_32S_64_DISP32 { .bitfield = \
- { .imm32 = 1, .imm32s = 1, .imm64 = 1, .disp32 = 1 } }
-#define OPERAND_TYPE_IMM32_32S_64_DISP32_64 { .bitfield = \
- { .imm32 = 1, .imm32s = 1, .imm64 = 1, .disp32 = 1, .disp64 = 1 } }
-#define OPERAND_TYPE_IMM64_DISP64 { .bitfield = \
- { .imm64 = 1, .disp64 = 1 } }
-
-#ifndef TE_PE
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
- { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32,
- BFD_RELOC_SIZE32 },
- { .bitfield = { .imm32 = 1, .imm64 = 1 } }, false },
-#endif
- { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
- BFD_RELOC_X86_64_PLTOFF64 },
- { .bitfield = { .imm64 = 1 } }, true },
- { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
- BFD_RELOC_X86_64_PLT32 },
- OPERAND_TYPE_IMM32_32S_DISP32, false },
- { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
- BFD_RELOC_X86_64_GOTPLT64 },
- OPERAND_TYPE_IMM64_DISP64, true },
- { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
- BFD_RELOC_X86_64_GOTOFF64 },
- OPERAND_TYPE_IMM64_DISP64, true },
- { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
- BFD_RELOC_X86_64_GOTPCREL },
- OPERAND_TYPE_IMM32_32S_DISP32, true },
- { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
- BFD_RELOC_X86_64_TLSGD },
- OPERAND_TYPE_IMM32_32S_DISP32, true },
- { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
- _dummy_first_bfd_reloc_code_real },
- OPERAND_TYPE_NONE, true },
- { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
- BFD_RELOC_X86_64_TLSLD },
- OPERAND_TYPE_IMM32_32S_DISP32, true },
- { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
- BFD_RELOC_X86_64_GOTTPOFF },
- OPERAND_TYPE_IMM32_32S_DISP32, true },
- { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
- BFD_RELOC_X86_64_TPOFF32 },
- OPERAND_TYPE_IMM32_32S_64_DISP32_64, true },
- { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
- _dummy_first_bfd_reloc_code_real },
- OPERAND_TYPE_NONE, true },
- { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
- BFD_RELOC_X86_64_DTPOFF32 },
- OPERAND_TYPE_IMM32_32S_64_DISP32_64, true },
- { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
- _dummy_first_bfd_reloc_code_real },
- OPERAND_TYPE_NONE, true },
- { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
- _dummy_first_bfd_reloc_code_real },
- OPERAND_TYPE_NONE, true },
- { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
- BFD_RELOC_X86_64_GOT32 },
- OPERAND_TYPE_IMM32_32S_64_DISP32, true },
- { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
- BFD_RELOC_X86_64_GOTPC32_TLSDESC },
- OPERAND_TYPE_IMM32_32S_DISP32, true },
- { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
- BFD_RELOC_X86_64_TLSDESC_CALL },
- OPERAND_TYPE_IMM32_32S_DISP32, true },
-#else /* TE_PE */
- { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL,
- BFD_RELOC_32_SECREL },
- OPERAND_TYPE_IMM32_32S_64_DISP32_64, false },
-#endif
-
-#undef OPERAND_TYPE_IMM32_32S_DISP32
-#undef OPERAND_TYPE_IMM32_32S_64_DISP32
-#undef OPERAND_TYPE_IMM32_32S_64_DISP32_64
-#undef OPERAND_TYPE_IMM64_DISP64
-
- };
char *cp;
unsigned int j;
-#if defined (OBJ_MAYBE_ELF) && !defined (TE_PE)
- if (!IS_ELF)
- return NULL;
-#endif
-
for (cp = input_line_pointer; *cp != '@'; cp++)
if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
return NULL;
@@ -13302,6 +13006,7 @@ lex_got (enum bfd_reloc_code_real *rel,
int first, second;
char *tmpbuf, *past_reloc;
+ i.has_gotrel = true;
*rel = gotrel[j].rel[object_64bit];
if (types)
@@ -13357,6 +13062,8 @@ lex_got (enum bfd_reloc_code_real *rel,
/* Might be a symbol version string. Don't as_bad here. */
return NULL;
}
+#else
+# define lex_got(reloc, adjust, types) NULL
#endif
bfd_reloc_code_real_type
@@ -13368,9 +13075,7 @@ x86_cons (expressionS *exp, int size)
exp->X_md = 0;
expr_mode = expr_operator_none;
-#if ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
- && !defined (LEX_AT)) \
- || defined (TE_PE)
+#if defined (OBJ_ELF) || defined (TE_PE)
if (size == 4 || (object_64bit && size == 8))
{
/* Handle @GOTOFF and the like in an expression. */
@@ -13458,13 +13163,14 @@ s_insn (int dummy ATTRIBUTE_UNUSED)
saved_char = *saved_ilp;
*saved_ilp = 0;
- end = parse_insn (line, mnemonic, true);
+ end = parse_insn (line, mnemonic, parse_prefix);
if (end == NULL)
{
bad:
*saved_ilp = saved_char;
ignore_rest_of_line ();
i.tm.mnem_off = 0;
+ memset (&pp, 0, sizeof (pp));
return;
}
line += end - line;
@@ -13503,20 +13209,20 @@ s_insn (int dummy ATTRIBUTE_UNUSED)
}
if (vex || xop
- ? i.encoding == encoding_evex
+ ? pp.encoding == encoding_evex
: evex
- ? i.encoding == encoding_vex
- || i.encoding == encoding_vex3
- : i.encoding != encoding_default)
+ ? pp.encoding == encoding_vex
+ || pp.encoding == encoding_vex3
+ : pp.encoding != encoding_default)
{
as_bad (_("pseudo-prefix conflicts with encoding specifier"));
goto bad;
}
- if (line > end && i.encoding == encoding_default)
- i.encoding = evex ? encoding_evex : encoding_vex;
+ if (line > end && pp.encoding == encoding_default)
+ pp.encoding = evex ? encoding_evex : encoding_vex;
- if (i.encoding != encoding_default)
+ if (pp.encoding != encoding_default)
{
/* Only address size and segment override prefixes are permitted with
VEX/XOP/EVEX encodings. */
@@ -13811,8 +13517,8 @@ s_insn (int dummy ATTRIBUTE_UNUSED)
}
/* No need to distinguish encoding_evex and encoding_evex512. */
- if (i.encoding == encoding_evex512)
- i.encoding = encoding_evex;
+ if (pp.encoding == encoding_evex512)
+ pp.encoding = encoding_evex;
}
/* Trim off encoding space. */
@@ -13821,8 +13527,8 @@ s_insn (int dummy ATTRIBUTE_UNUSED)
uint8_t byte = val >> ((--j - 1) * 8);
i.insn_opcode_space = SPACE_0F;
- switch (byte & -(j > 1 && !i.rex2_encoding
- && (i.encoding != encoding_egpr || evex)))
+ switch (byte & -(j > 1 && !pp.rex2_encoding
+ && (pp.encoding != encoding_egpr || evex)))
{
case 0x38:
i.insn_opcode_space = SPACE_0F38;
@@ -13856,7 +13562,7 @@ s_insn (int dummy ATTRIBUTE_UNUSED)
expressionS *disp_exp = NULL;
bool changed;
- if (i.encoding == encoding_egpr)
+ if (pp.encoding == encoding_egpr)
{
if (vex || xop)
{
@@ -13864,21 +13570,21 @@ s_insn (int dummy ATTRIBUTE_UNUSED)
goto done;
}
if (evex)
- i.encoding = encoding_evex;
+ pp.encoding = encoding_evex;
else
- i.encoding = encoding_default;
+ pp.encoding = encoding_default;
}
/* Are we to emit ModR/M encoding? */
if (!i.short_form
&& (i.mem_operands
- || i.reg_operands > (i.encoding != encoding_default)
+ || i.reg_operands > (pp.encoding != encoding_default)
|| i.tm.extension_opcode != None))
i.tm.opcode_modifier.modrm = 1;
if (!i.tm.opcode_modifier.modrm
&& (i.reg_operands
- > i.short_form + 0U + (i.encoding != encoding_default)
+ > i.short_form + 0U + (pp.encoding != encoding_default)
|| i.mem_operands))
{
as_bad (_("too many register/memory operands"));
@@ -13917,9 +13623,10 @@ s_insn (int dummy ATTRIBUTE_UNUSED)
}
/* Fall through. */
case 3:
- if (i.encoding != encoding_default)
+ if (pp.encoding != encoding_default)
{
- i.tm.opcode_modifier.vexvvvv = VexVVVV_SRC1;
+ i.tm.opcode_modifier.vexvvvv = i.tm.extension_opcode == None
+ ? VexVVVV_SRC1 : VexVVVV_DST;
break;
}
/* Fall through. */
@@ -13973,13 +13680,13 @@ s_insn (int dummy ATTRIBUTE_UNUSED)
|| i.index_reg->reg_type.bitfield.ymmword
|| i.index_reg->reg_type.bitfield.zmmword))
{
- if (i.encoding == encoding_default)
+ if (pp.encoding == encoding_default)
{
as_bad (_("VSIB unavailable with legacy encoding"));
goto done;
}
- if (i.encoding == encoding_evex
+ if (pp.encoding == encoding_evex
&& i.reg_operands > 1)
{
/* We could allow two register operands, encoding the 2nd one in
@@ -13999,11 +13706,11 @@ s_insn (int dummy ATTRIBUTE_UNUSED)
for (j = i.imm_operands; j < i.operands; ++j)
{
/* Look for 8-bit operands that use old registers. */
- if (i.encoding != encoding_default
+ if (pp.encoding != encoding_default
&& flag_code == CODE_64BIT
&& i.types[j].bitfield.class == Reg
&& i.types[j].bitfield.byte
- && !(i.op[j].regs->reg_flags & RegRex64)
+ && !(i.op[j].regs->reg_flags & (RegRex | RegRex2 | RegRex64))
&& i.op[j].regs->reg_num > 3)
as_bad (_("can't encode register '%s%s' with VEX/XOP/EVEX"),
register_prefix, i.op[j].regs->reg_name);
@@ -14058,7 +13765,7 @@ s_insn (int dummy ATTRIBUTE_UNUSED)
case 4: combined.bitfield.dword = 1; break;
}
- if (i.encoding == encoding_default)
+ if (pp.encoding == encoding_default)
{
if (flag_code == CODE_64BIT && combined.bitfield.qword)
i.rex |= REX_W;
@@ -14123,11 +13830,11 @@ s_insn (int dummy ATTRIBUTE_UNUSED)
++i.memshift;
else if (disp_exp != NULL && disp_exp->X_op == O_constant
&& disp_exp->X_add_number != 0
- && i.disp_encoding != disp_encoding_32bit)
+ && pp.disp_encoding != disp_encoding_32bit)
{
if (!quiet_warnings)
as_warn ("cannot determine memory operand size");
- i.disp_encoding = disp_encoding_32bit;
+ pp.disp_encoding = disp_encoding_32bit;
}
}
}
@@ -14135,7 +13842,7 @@ s_insn (int dummy ATTRIBUTE_UNUSED)
if (i.memshift >= 32)
i.memshift = 0;
else if (!evex)
- i.encoding = encoding_error;
+ pp.encoding = encoding_error;
if (i.disp_operands && !optimize_disp (&i.tm))
goto done;
@@ -14201,8 +13908,8 @@ s_insn (int dummy ATTRIBUTE_UNUSED)
potential special casing there. */
i.tm.base_opcode |= val;
- if (i.encoding == encoding_error
- || (i.encoding != encoding_evex
+ if (pp.encoding == encoding_error
+ || (pp.encoding != encoding_evex
? i.broadcast.type || i.broadcast.bytes
|| i.rounding.type != rc_none
|| i.mask.reg
@@ -14239,10 +13946,10 @@ s_insn (int dummy ATTRIBUTE_UNUSED)
last_insn->name = ".insn directive";
last_insn->file = as_where (&last_insn->line);
-#if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
+#ifdef OBJ_ELF
/* PS: SCFI is enabled only for System V AMD64 ABI. The ABI check has been
performed in i386_target_format. */
- if (IS_ELF && flag_synth_cfi)
+ if (flag_synth_cfi)
as_bad (_("SCFI: hand-crafting instructions not supported"));
#endif
@@ -14254,6 +13961,9 @@ s_insn (int dummy ATTRIBUTE_UNUSED)
/* Make sure dot_insn() won't yield "true" anymore. */
i.tm.mnem_off = 0;
+
+ current_templates.start = NULL;
+ memset (&pp, 0, sizeof (pp));
}
#ifdef TE_PE
@@ -14313,11 +14023,11 @@ RC_SAE_specifier (const char *pstr)
return NULL;
}
- switch (i.encoding)
+ switch (pp.encoding)
{
case encoding_default:
case encoding_egpr:
- i.encoding = encoding_evex512;
+ pp.encoding = encoding_evex512;
break;
case encoding_evex:
case encoding_evex512:
@@ -14350,6 +14060,8 @@ check_VecOperations (char *op_string)
if (*op_string == '{')
{
op_string++;
+ if (is_space_char (*op_string))
+ op_string++;
/* Check broadcasts. */
if (startswith (op_string, "1to"))
@@ -14385,11 +14097,11 @@ check_VecOperations (char *op_string)
}
op_string++;
- switch (i.encoding)
+ switch (pp.encoding)
{
case encoding_default:
case encoding_egpr:
- i.encoding = encoding_evex;
+ pp.encoding = encoding_evex;
break;
case encoding_evex:
case encoding_evex512:
@@ -14520,6 +14232,8 @@ check_VecOperations (char *op_string)
else
goto unknown_vec_op;
+ if (is_space_char (*op_string))
+ op_string++;
if (*op_string != '}')
{
as_bad (_("missing `}' in `%s'"), saved);
@@ -14527,8 +14241,6 @@ check_VecOperations (char *op_string)
}
op_string++;
- /* Strip whitespace since the addition of pseudo prefixes
- changed how the scrubber treats '{'. */
if (is_space_char (*op_string))
++op_string;
@@ -14643,9 +14355,8 @@ i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
&& flag_code != CODE_64BIT && !object_64bit)
exp->X_add_number = extend_to_32bit_address (exp->X_add_number);
}
-#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
- else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
- && exp_seg != absolute_section
+#ifdef OBJ_AOUT
+ else if (exp_seg != absolute_section
&& exp_seg != text_section
&& exp_seg != data_section
&& exp_seg != bss_section
@@ -14932,9 +14643,8 @@ i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
exp->X_add_number = extend_to_32bit_address (exp->X_add_number);
}
-#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
- else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
- && exp_seg != absolute_section
+#ifdef OBJ_AOUT
+ else if (exp_seg != absolute_section
&& exp_seg != text_section
&& exp_seg != data_section
&& exp_seg != bss_section
@@ -15115,7 +14825,7 @@ i386_index_check (const char *operand_string)
if (addr_mode != CODE_16BIT)
{
/* 32-bit/64-bit checks. */
- if (i.disp_encoding == disp_encoding_16bit)
+ if (pp.disp_encoding == disp_encoding_16bit)
{
bad_disp:
as_bad (_("invalid `%s' prefix"),
@@ -15161,7 +14871,7 @@ i386_index_check (const char *operand_string)
else
{
/* 16-bit checks. */
- if (i.disp_encoding == disp_encoding_32bit)
+ if (pp.disp_encoding == disp_encoding_32bit)
goto bad_disp;
if ((i.base_reg
@@ -15190,10 +14900,17 @@ RC_SAE_immediate (const char *imm_start)
if (*pstr != '{')
return 0;
- pstr = RC_SAE_specifier (pstr + 1);
+ pstr++;
+ if (is_space_char (*pstr))
+ pstr++;
+
+ pstr = RC_SAE_specifier (pstr);
if (pstr == NULL)
return 0;
+ if (is_space_char (*pstr))
+ pstr++;
+
if (*pstr++ != '}')
{
as_bad (_("Missing '}': '%s'"), imm_start);
@@ -15608,7 +15325,7 @@ i386_frag_max_var (fragS *frag)
return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
}
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+#ifdef OBJ_ELF
static int
elf_symbol_resolved_in_segment_p (symbolS *fr_symbol, offsetT fr_var)
{
@@ -16066,14 +15783,12 @@ md_estimate_size_before_relax (fragS *fragP, segT segment)
an externally visible symbol, because it may be overridden by a
shared library. */
if (S_GET_SEGMENT (fragP->fr_symbol) != segment
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
- || (IS_ELF
- && !elf_symbol_resolved_in_segment_p (fragP->fr_symbol,
- fragP->fr_var))
+#ifdef OBJ_ELF
+ || !elf_symbol_resolved_in_segment_p (fragP->fr_symbol,
+ fragP->fr_var)
#endif
#if defined (OBJ_COFF) && defined (TE_PE)
- || (OUTPUT_FLAVOR == bfd_target_coff_flavour
- && S_IS_WEAK (fragP->fr_symbol))
+ || S_IS_WEAK (fragP->fr_symbol)
#endif
)
{
@@ -16089,7 +15804,7 @@ md_estimate_size_before_relax (fragS *fragP, segT segment)
reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
else if (size == 2)
reloc_type = BFD_RELOC_16_PCREL;
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+#ifdef OBJ_ELF
else if (fragP->tc_frag_data.code == CODE_64BIT
&& fragP->fr_offset == 0
&& need_plt32_p (fragP->fr_symbol))
@@ -16440,30 +16155,22 @@ md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
This covers for the fact that bfd_install_relocation will
subtract the current location (for partial_inplace, PC relative
relocations); see more below. */
-#ifndef OBJ_AOUT
- if (IS_ELF
-#ifdef TE_PE
- || OUTPUT_FLAVOR == bfd_target_coff_flavour
-#endif
- )
- value += fixP->fx_where + fixP->fx_frag->fr_address;
+#if defined (OBJ_ELF) || defined (TE_PE)
+ value += fixP->fx_where + fixP->fx_frag->fr_address;
#endif
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
- if (IS_ELF)
- {
- segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
+#ifdef OBJ_ELF
+ segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
- if ((sym_seg == seg
- || (symbol_section_p (fixP->fx_addsy)
- && sym_seg != absolute_section))
- && !generic_force_reloc (fixP))
- {
- /* Yes, we add the values in twice. This is because
- bfd_install_relocation subtracts them out again. I think
- bfd_install_relocation is broken, but I don't dare change
- it. FIXME. */
- value += fixP->fx_where + fixP->fx_frag->fr_address;
- }
+ if ((sym_seg == seg
+ || (symbol_section_p (fixP->fx_addsy)
+ && sym_seg != absolute_section))
+ && !generic_force_reloc (fixP))
+ {
+ /* Yes, we add the values in twice. This is because
+ bfd_install_relocation subtracts them out again. I think
+ bfd_install_relocation is broken, but I don't dare change
+ it. FIXME. */
+ value += fixP->fx_where + fixP->fx_frag->fr_address;
}
#endif
#if defined (OBJ_COFF) && defined (TE_PE)
@@ -16496,8 +16203,8 @@ md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
/* Fix a few things - the dynamic linker expects certain values here,
and we must not disappoint it. */
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
- if (IS_ELF && fixP->fx_addsy)
+#ifdef OBJ_ELF
+ if (fixP->fx_addsy)
switch (fixP->fx_r_type)
{
case BFD_RELOC_386_PLT32:
@@ -16549,7 +16256,7 @@ md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
default:
break;
}
-#endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
+#endif /* OBJ_ELF */
/* If not 64bit, massage value, to account for wraparound when !BFD64. */
if (!object_64bit)
@@ -16664,17 +16371,18 @@ static bool check_register (const reg_entry *r)
if (vector_size < VSZ512)
return false;
- switch (i.encoding)
+ /* Don't update pp when not dealing with insn operands. */
+ switch (current_templates.start ? pp.encoding : encoding_evex)
{
case encoding_default:
case encoding_egpr:
- i.encoding = encoding_evex512;
+ pp.encoding = encoding_evex512;
break;
case encoding_evex:
case encoding_evex512:
break;
default:
- i.encoding = encoding_error;
+ pp.encoding = encoding_error;
break;
}
}
@@ -16702,17 +16410,18 @@ static bool check_register (const reg_entry *r)
|| flag_code != CODE_64BIT)
return false;
- switch (i.encoding)
+ /* Don't update pp when not dealing with insn operands. */
+ switch (current_templates.start ? pp.encoding : encoding_evex)
{
case encoding_default:
case encoding_egpr:
case encoding_evex512:
- i.encoding = encoding_evex;
+ pp.encoding = encoding_evex;
break;
case encoding_evex:
break;
default:
- i.encoding = encoding_error;
+ pp.encoding = encoding_error;
break;
}
}
@@ -16723,17 +16432,18 @@ static bool check_register (const reg_entry *r)
|| flag_code != CODE_64BIT)
return false;
- switch (i.encoding)
+ /* Don't update pp when not dealing with insn operands. */
+ switch (current_templates.start ? pp.encoding : encoding_egpr)
{
case encoding_default:
- i.encoding = encoding_egpr;
+ pp.encoding = encoding_egpr;
break;
case encoding_egpr:
case encoding_evex:
case encoding_evex512:
break;
default:
- i.encoding = encoding_error;
+ pp.encoding = encoding_error;
break;
}
}
@@ -16977,10 +16687,10 @@ bool i386_record_operator (operatorT op,
}
#endif
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
-const char *md_shortopts = "kVQ:sqnO::";
+#ifdef OBJ_ELF
+const char md_shortopts[] = "kVQ:sqnO::";
#else
-const char *md_shortopts = "qnO::";
+const char md_shortopts[] = "qnO::";
#endif
#define OPTION_32 (OPTION_MD_BASE + 0)
@@ -17018,16 +16728,19 @@ const char *md_shortopts = "qnO::";
#define OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH (OPTION_MD_BASE + 32)
#define OPTION_MLFENCE_BEFORE_RET (OPTION_MD_BASE + 33)
#define OPTION_MUSE_UNALIGNED_VECTOR_MOVE (OPTION_MD_BASE + 34)
+#define OPTION_MTLS_CHECK (OPTION_MD_BASE + 35)
-struct option md_longopts[] =
+const struct option md_longopts[] =
{
{"32", no_argument, NULL, OPTION_32},
-#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
- || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
+#if (defined (OBJ_ELF) || defined (TE_PE) || defined (OBJ_MACH_O)) \
+ && defined (BFD64)
{"64", no_argument, NULL, OPTION_64},
#endif
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+#ifdef OBJ_ELF
+# ifdef BFD64
{"x32", no_argument, NULL, OPTION_X32},
+# endif
{"mshared", no_argument, NULL, OPTION_MSHARED},
{"mx86-used-note", required_argument, NULL, OPTION_X86_USED_NOTE},
#endif
@@ -17064,9 +16777,10 @@ struct option md_longopts[] =
{"mlfence-before-ret", required_argument, NULL, OPTION_MLFENCE_BEFORE_RET},
{"mamd64", no_argument, NULL, OPTION_MAMD64},
{"mintel64", no_argument, NULL, OPTION_MINTEL64},
+ {"mtls-check", required_argument, NULL, OPTION_MTLS_CHECK},
{NULL, no_argument, NULL, 0}
};
-size_t md_longopts_size = sizeof (md_longopts);
+const size_t md_longopts_size = sizeof (md_longopts);
int
md_parse_option (int c, const char *arg)
@@ -17084,7 +16798,7 @@ md_parse_option (int c, const char *arg)
quiet_warnings = 1;
break;
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+#ifdef OBJ_ELF
/* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
should be emitted or not. FIXME: Not implemented. */
case 'Q':
@@ -17118,11 +16832,11 @@ md_parse_option (int c, const char *arg)
else
as_fatal (_("invalid -mx86-used-note= option: `%s'"), arg);
break;
+#endif
+#ifdef BFD64
-#endif
-#if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
- || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
+#if (defined (OBJ_ELF) || defined (TE_PE) || defined (OBJ_MACH_O))
case OPTION_64:
{
const char **list, **l;
@@ -17145,28 +16859,27 @@ md_parse_option (int c, const char *arg)
break;
#endif
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+#ifdef OBJ_ELF
case OPTION_X32:
- if (IS_ELF)
- {
- const char **list, **l;
+ {
+ const char **list, **l;
- list = bfd_target_list ();
- for (l = list; *l != NULL; l++)
- if (startswith (*l, "elf32-x86-64"))
- {
- default_arch = "x86_64:32";
- break;
- }
- if (*l == NULL)
- as_fatal (_("no compiled in support for 32bit x86_64"));
- free (list);
- }
- else
- as_fatal (_("32bit x86_64 is only supported for ELF"));
+ list = bfd_target_list ();
+ for (l = list; *l != NULL; l++)
+ if (startswith (*l, "elf32-x86-64"))
+ {
+ default_arch = "x86_64:32";
+ break;
+ }
+ if (*l == NULL)
+ as_fatal (_("no compiled in support for 32bit x86_64"));
+ free (list);
+ }
break;
#endif
+#endif /* BFD64 */
+
case OPTION_32:
{
const char **list, **l;
@@ -17620,6 +17333,14 @@ md_parse_option (int c, const char *arg)
optimize_for_space = 0;
}
break;
+ case OPTION_MTLS_CHECK:
+ if (strcasecmp (arg, "yes") == 0)
+ tls_check = true;
+ else if (strcasecmp (arg, "no") == 0)
+ tls_check = false;
+ else
+ as_fatal (_("invalid -mtls-check= option: `%s'"), arg);
+ break;
default:
return 0;
@@ -17743,7 +17464,7 @@ show_arch (FILE *stream, int ext, int check)
void
md_show_usage (FILE *stream)
{
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+#ifdef OBJ_ELF
fprintf (stream, _("\
-Qy, -Qn ignored\n\
-V print assembler version number\n\
@@ -17753,12 +17474,12 @@ md_show_usage (FILE *stream)
-n do not optimize code alignment\n\
-O{012s} attempt some code optimizations\n\
-q quieten some warnings\n"));
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+#ifdef OBJ_ELF
fprintf (stream, _("\
-s ignored\n"));
#endif
#ifdef BFD64
-# if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+# ifdef OBJ_ELF
fprintf (stream, _("\
--32/--64/--x32 generate 32bit/64bit/x32 object\n"));
# elif defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O)
@@ -17831,7 +17552,7 @@ md_show_usage (FILE *stream)
-mnaked-reg don't require `%%' prefix for registers\n"));
fprintf (stream, _("\
-madd-bnd-prefix add BND prefix for all valid branches\n"));
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+#ifdef OBJ_ELF
fprintf (stream, _("\
-mshared disable branch optimization for shared code\n"));
fprintf (stream, _("\
@@ -17862,6 +17583,16 @@ md_show_usage (FILE *stream)
fprintf (stream, _("(default: no)\n"));
fprintf (stream, _("\
generate relax relocations\n"));
+#ifdef OBJ_ELF
+ fprintf (stream, _("\
+ -mtls-check=[no|yes] "));
+ if (DEFAULT_X86_TLS_CHECK)
+ fprintf (stream, _("(default: yes)\n"));
+ else
+ fprintf (stream, _("(default: no)\n"));
+ fprintf (stream, _("\
+ check TLS relocation\n"));
+#endif
fprintf (stream, _("\
-malign-branch-boundary=NUM (default: 0)\n\
align branches within NUM byte boundary\n"));
@@ -17891,9 +17622,7 @@ md_show_usage (FILE *stream)
-mintel64 accept only Intel64 ISA\n"));
}
-#if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
- || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
- || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
+#if (defined (OBJ_ELF) || defined (TE_PE) || defined (OBJ_MACH_O))
/* Pick the target format to use. */
@@ -17903,7 +17632,7 @@ i386_target_format (void)
if (startswith (default_arch, "x86_64"))
{
update_code_flag (CODE_64BIT, 1);
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+#ifdef OBJ_ELF
if (default_arch[6] == '\0')
x86_elf_abi = X86_64_ABI;
else
@@ -17934,8 +17663,8 @@ i386_target_format (void)
else
as_fatal (_("unknown architecture"));
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
- if (IS_ELF && flag_synth_cfi && x86_elf_abi != X86_64_ABI)
+#ifdef OBJ_ELF
+ if (flag_synth_cfi && x86_elf_abi != X86_64_ABI)
as_fatal (_("SCFI is not supported for this ABI"));
#endif
@@ -17944,12 +17673,7 @@ i386_target_format (void)
switch (OUTPUT_FLAVOR)
{
-#if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
- case bfd_target_aout_flavour:
- return AOUT_TARGET_FORMAT;
-#endif
-#if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
-# if defined (TE_PE) || defined (TE_PEP)
+#ifdef TE_PE
case bfd_target_coff_flavour:
if (flag_code == CODE_64BIT)
{
@@ -17957,15 +17681,8 @@ i386_target_format (void)
return use_big_obj ? "pe-bigobj-x86-64" : "pe-x86-64";
}
return use_big_obj ? "pe-bigobj-i386" : "pe-i386";
-# elif defined (TE_GO32)
- case bfd_target_coff_flavour:
- return "coff-go32";
-# else
- case bfd_target_coff_flavour:
- return "coff-i386";
-# endif
#endif
-#if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
+#ifdef OBJ_ELF
case bfd_target_elf_flavour:
{
const char *format;
@@ -18023,7 +17740,7 @@ i386_target_format (void)
}
}
-#endif /* OBJ_MAYBE_ more than one */
+#endif /* ELF / PE / MACH_O */
symbolS *
md_undefined_symbol (char *name)
@@ -18045,26 +17762,20 @@ md_undefined_symbol (char *name)
return 0;
}
-#if defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT)
+#ifdef OBJ_AOUT
/* Round up a section size to the appropriate boundary. */
valueT
md_section_align (segT segment, valueT size)
{
- if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
- {
- /* For a.out, force the section size to be aligned. If we don't do
- this, BFD will align it for us, but it will not write out the
- final bytes of the section. This may be a bug in BFD, but it is
- easier to fix it here since that is how the other a.out targets
- work. */
- int align;
-
- align = bfd_section_alignment (segment);
- size = ((size + (1 << align) - 1) & (-((valueT) 1 << align)));
- }
-
- return size;
+ /* For a.out, force the section size to be aligned. If we don't do
+ this, BFD will align it for us, but it will not write out the
+ final bytes of the section. This may be a bug in BFD, but it is
+ easier to fix it here since that is how the other a.out targets
+ work. */
+ int align = bfd_section_alignment (segment);
+
+ return ((size + (1 << align) - 1) & (-((valueT) 1 << align)));
}
#endif
@@ -18122,10 +17833,10 @@ i386_validate_fix (fixS *fixp)
return 0;
}
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+#ifdef OBJ_ELF
if (fixp->fx_r_type == BFD_RELOC_SIZE32
|| fixp->fx_r_type == BFD_RELOC_SIZE64)
- return IS_ELF && fixp->fx_addsy
+ return fixp->fx_addsy
&& (!S_IS_DEFINED (fixp->fx_addsy)
|| S_IS_EXTERNAL (fixp->fx_addsy));
@@ -18160,7 +17871,7 @@ i386_validate_fix (fixS *fixp)
{
if (!object_64bit)
abort ();
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+#ifdef OBJ_ELF
if (fixp->fx_tcbit)
fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCRELX;
else if (fixp->fx_tcbit2)
@@ -18181,13 +17892,17 @@ i386_validate_fix (fixS *fixp)
fixp->fx_subsy = 0;
}
}
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+#ifdef OBJ_ELF
else
{
/* NB: Commit 292676c1 resolved PLT32 reloc aganst local symbol
to section. Since PLT32 relocation must be against symbols,
- turn such PLT32 relocation into PC32 relocation. */
+ turn such PLT32 relocation into PC32 relocation. NB: We can
+ turn PLT32 relocation into PC32 relocation only for PC-relative
+ relocations since non-PC-relative relocations need PLT entries.
+ */
if (fixp->fx_addsy
+ && fixp->fx_pcrel
&& (fixp->fx_r_type == BFD_RELOC_386_PLT32
|| fixp->fx_r_type == BFD_RELOC_X86_64_PLT32)
&& symbol_section_p (fixp->fx_addsy))
@@ -18212,7 +17927,7 @@ tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
switch (fixp->fx_r_type)
{
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+#ifdef OBJ_ELF
symbolS *sym;
case BFD_RELOC_SIZE32:
@@ -18229,7 +17944,7 @@ tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
sym = fixp->fx_subsy;
else
sym = NULL;
- if (IS_ELF && sym && S_IS_DEFINED (sym) && !S_IS_EXTERNAL (sym))
+ if (sym && S_IS_DEFINED (sym) && !S_IS_EXTERNAL (sym))
{
/* Resolve size relocation against local symbol to size of
the symbol plus addend. */
@@ -18507,7 +18222,7 @@ tc_x86_frame_initial_instructions (void)
int
x86_dwarf2_addr_size (void)
{
-#if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
+#ifdef OBJ_ELF
if (x86_elf_abi == X86_64_X32_ABI)
return 4;
#endif
@@ -18527,7 +18242,7 @@ tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
}
#endif
-#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
+#ifdef OBJ_ELF
int
i386_elf_section_type (const char *str, size_t len)
{
@@ -18636,4 +18351,4 @@ handle_large_common (int small ATTRIBUTE_UNUSED)
bss_section = saved_bss_section;
}
}
-#endif /* OBJ_ELF || OBJ_MAYBE_ELF */
+#endif /* OBJ_ELF */