aboutsummaryrefslogtreecommitdiff
path: root/bfd
diff options
context:
space:
mode:
authorJim Wilson <wilson@tuliptree.org>2000-04-21 20:22:24 +0000
committerJim Wilson <wilson@tuliptree.org>2000-04-21 20:22:24 +0000
commit800eeca487f145ccc5481a03bfff2b871a2fd361 (patch)
treecedc52859f0a66d17a78d5b7e772f8a6c3d3b693 /bfd
parentc9637625e4ff16d9a9f3a203c5609cd5ada1eafa (diff)
downloadgdb-800eeca487f145ccc5481a03bfff2b871a2fd361.zip
gdb-800eeca487f145ccc5481a03bfff2b871a2fd361.tar.gz
gdb-800eeca487f145ccc5481a03bfff2b871a2fd361.tar.bz2
IA-64 ELF support.
Diffstat (limited to 'bfd')
-rw-r--r--bfd/ChangeLog28
-rw-r--r--bfd/Makefile.am9
-rw-r--r--bfd/Makefile.in9
-rw-r--r--bfd/archures.c3
-rw-r--r--bfd/bfd-in2.h66
-rw-r--r--bfd/config.bfd4
-rwxr-xr-xbfd/configure7
-rw-r--r--bfd/configure.host2
-rw-r--r--bfd/configure.in7
-rw-r--r--bfd/cpu-ia64-opc.c586
-rw-r--r--bfd/cpu-ia64.c42
-rw-r--r--bfd/elf.c3
-rw-r--r--bfd/elf64-ia64.c3696
-rw-r--r--bfd/libbfd.h63
-rw-r--r--bfd/reloc.c128
-rw-r--r--bfd/targets.c4
16 files changed, 4657 insertions, 0 deletions
diff --git a/bfd/ChangeLog b/bfd/ChangeLog
index 458fbfa..c300410 100644
--- a/bfd/ChangeLog
+++ b/bfd/ChangeLog
@@ -1,3 +1,31 @@
+Fri Apr 21 13:20:53 2000 Richard Henderson <rth@cygnus.com>
+ David Mosberger <davidm@hpl.hp.com>
+ Timothy Wall <twall@cygnus.com>
+ Jim Wilson <wilson@cygnus.com>
+
+ * Makefile.am (ALL_MACHINES): Add cpu-ia64.lo.
+ (ALL_MACHINES_CFILES): Add cpu-ia64.c.
+ (BFD64_BACKENDS): Add elf64-ia64.lo.
+ (BFD64_BACKENDS_CFILES): Add elf64-ia64.c.
+ (cpu-ia64.lo, elf64-ia64.lo): New rules.
+ * Makefile.in: Rebuild.
+ * archures.c (enum bfd_architecture): Add bfd_arch_ia64.
+ (bfd_ia64_arch): Declare.
+ (bfd_archures_list): Add bfd_ia64_arch.
+ * bfd-in2.h: Rebuild.
+ * config.bfd: (ia64*-*-linux-gnu*, ia64*-*-elf*): New targets.
+ * configure: Rebuild.
+ * configure.host: (ia64-*-linux*): New host.
+ * configure.in (bfd_elf64_ia64_little_vec, bfd_elf64_ia64_big_vec,
+ bfd_efi_app_ia64_vec, bfd_efi_app_ia64_vec): New vectors.
+ * elf.c (prep_headers): Add bfd_arch_ia64.
+ * libbfd.h: Rebuild.
+ * reloc.c: Add IA-64 relocations.
+ * targets.c (bfd_elf64_ia64_little_vec, bfd_elf64_ia64_big_vec):
+ Declare.
+ (bfd_target_vect): Add bfd_elf64_ia64_little_vec.
+ * cpu-ia64-opc.c, cpu-ia64.c, elf64-ia64.c: New files.
+
2000-04-21 Richard Henderson <rth@cygnus.com>
* elf32-d30v.c (bfd_elf_d30v_reloc): Don't modify section
diff --git a/bfd/Makefile.am b/bfd/Makefile.am
index 142fa6d..fd7fe42 100644
--- a/bfd/Makefile.am
+++ b/bfd/Makefile.am
@@ -51,6 +51,7 @@ ALL_MACHINES = \
cpu-h8300.lo \
cpu-h8500.lo \
cpu-hppa.lo \
+ cpu-ia64.lo \
cpu-i370.lo \
cpu-i386.lo \
cpu-i860.lo \
@@ -89,6 +90,7 @@ ALL_MACHINES_CFILES = \
cpu-h8300.c \
cpu-h8500.c \
cpu-hppa.c \
+ cpu-ia64.c \
cpu-i370.c \
cpu-i386.c \
cpu-i860.c \
@@ -388,6 +390,7 @@ BFD64_BACKENDS = \
coff-ia64.lo \
demo64.lo \
elf64-alpha.lo \
+ elf64-ia64.lo \
elf64-gen.lo \
elf64-mips.lo \
elf64-sparc.lo \
@@ -401,6 +404,7 @@ BFD64_BACKENDS_CFILES = \
coff-ia64.c \
demo64.c \
elf64-alpha.c \
+ elf64-ia64.c \
elf64-gen.c \
elf64-mips.c \
elf64-sparc.c \
@@ -654,6 +658,11 @@ config.status: $(srcdir)/configure $(srcdir)/config.bfd $(srcdir)/configure.host
$(SHELL) ./config.status --recheck
+cpu-ia64.lo: cpu-ia64.c cpu-ia64-opc.c $(srcdir)/../opcodes/ia64-opc.h
+elf64-ia64.lo: elf64-ia64.c elf-bfd.h $(INCDIR)/opcode/ia64.h \
+ $(INCDIR)/elf/ia64.h $(INCDIR)/elf/common.h $(INCDIR)/elf/internal.h \
+ $(INCDIR)/elf/external.h $(INCDIR)/bfdlink.h $(INCDIR)/elf/reloc-macros.h \
+ elf64-target.h
elfarm-oabi.lo: elfarm-oabi.c elf32-arm.h elf-bfd.h $(INCDIR)/elf/common.h \
$(INCDIR)/elf/internal.h $(INCDIR)/elf/external.h $(INCDIR)/bfdlink.h \
$(INCDIR)/elf/arm.h $(INCDIR)/elf/reloc-macros.h elf32-target.h
diff --git a/bfd/Makefile.in b/bfd/Makefile.in
index 1421bb2..360b32d 100644
--- a/bfd/Makefile.in
+++ b/bfd/Makefile.in
@@ -166,6 +166,7 @@ ALL_MACHINES = \
cpu-h8300.lo \
cpu-h8500.lo \
cpu-hppa.lo \
+ cpu-ia64.lo \
cpu-i370.lo \
cpu-i386.lo \
cpu-i860.lo \
@@ -205,6 +206,7 @@ ALL_MACHINES_CFILES = \
cpu-h8300.c \
cpu-h8500.c \
cpu-hppa.c \
+ cpu-ia64.c \
cpu-i370.c \
cpu-i386.c \
cpu-i860.c \
@@ -507,6 +509,7 @@ BFD64_BACKENDS = \
coff-ia64.lo \
demo64.lo \
elf64-alpha.lo \
+ elf64-ia64.lo \
elf64-gen.lo \
elf64-mips.lo \
elf64-sparc.lo \
@@ -521,6 +524,7 @@ BFD64_BACKENDS_CFILES = \
coff-ia64.c \
demo64.c \
elf64-alpha.c \
+ elf64-ia64.c \
elf64-gen.c \
elf64-mips.c \
elf64-sparc.c \
@@ -1182,6 +1186,11 @@ stmp-lcoff-h: $(LIBCOFF_H_FILES)
config.status: $(srcdir)/configure $(srcdir)/config.bfd $(srcdir)/configure.host
$(SHELL) ./config.status --recheck
+cpu-ia64.lo: cpu-ia64.c cpu-ia64-opc.c $(srcdir)/../opcodes/ia64-opc.h
+elf64-ia64.lo: elf64-ia64.c elf-bfd.h $(INCDIR)/opcode/ia64.h \
+ $(INCDIR)/elf/ia64.h $(INCDIR)/elf/common.h $(INCDIR)/elf/internal.h \
+ $(INCDIR)/elf/external.h $(INCDIR)/bfdlink.h $(INCDIR)/elf/reloc-macros.h \
+ elf64-target.h
elfarm-oabi.lo: elfarm-oabi.c elf32-arm.h elf-bfd.h $(INCDIR)/elf/common.h \
$(INCDIR)/elf/internal.h $(INCDIR)/elf/external.h $(INCDIR)/bfdlink.h \
$(INCDIR)/elf/arm.h $(INCDIR)/elf/reloc-macros.h elf32-target.h
diff --git a/bfd/archures.c b/bfd/archures.c
index d24ee82..536f2b1 100644
--- a/bfd/archures.c
+++ b/bfd/archures.c
@@ -199,6 +199,7 @@ DESCRIPTION
. bfd_arch_fr30,
.#define bfd_mach_fr30 0x46523330
. bfd_arch_mcore,
+. bfd_arch_ia64, {* HP/Intel ia64 *}
. bfd_arch_pj,
. bfd_arch_avr, {* Atmel AVR microcontrollers *}
.#define bfd_mach_avr1 1
@@ -279,6 +280,7 @@ extern const bfd_arch_info_type bfd_v850_arch;
extern const bfd_arch_info_type bfd_fr30_arch;
extern const bfd_arch_info_type bfd_mcore_arch;
extern const bfd_arch_info_type bfd_avr_arch;
+extern const bfd_arch_info_type bfd_ia64_arch;
static const bfd_arch_info_type * const bfd_archures_list[] =
{
@@ -320,6 +322,7 @@ static const bfd_arch_info_type * const bfd_archures_list[] =
&bfd_fr30_arch,
&bfd_mcore_arch,
&bfd_avr_arch,
+ &bfd_ia64_arch,
#endif
0
};
diff --git a/bfd/bfd-in2.h b/bfd/bfd-in2.h
index e20c650..f00e8a1 100644
--- a/bfd/bfd-in2.h
+++ b/bfd/bfd-in2.h
@@ -1432,6 +1432,7 @@ enum bfd_architecture
bfd_arch_fr30,
#define bfd_mach_fr30 0x46523330
bfd_arch_mcore,
+ bfd_arch_ia64, /* HP/Intel ia64 */
bfd_arch_pj,
bfd_arch_avr, /* Atmel AVR microcontrollers */
#define bfd_mach_avr1 1
@@ -2448,6 +2449,71 @@ is stored in the reloc's addend. For Rel hosts, we are forced to put
this offset in the reloc's section offset. */
BFD_RELOC_VTABLE_INHERIT,
BFD_RELOC_VTABLE_ENTRY,
+
+/* Intel IA64 Relocations. */
+ BFD_RELOC_IA64_IMM14,
+ BFD_RELOC_IA64_IMM22,
+ BFD_RELOC_IA64_IMM64,
+ BFD_RELOC_IA64_DIR32MSB,
+ BFD_RELOC_IA64_DIR32LSB,
+ BFD_RELOC_IA64_DIR64MSB,
+ BFD_RELOC_IA64_DIR64LSB,
+ BFD_RELOC_IA64_GPREL22,
+ BFD_RELOC_IA64_GPREL64I,
+ BFD_RELOC_IA64_GPREL32MSB,
+ BFD_RELOC_IA64_GPREL32LSB,
+ BFD_RELOC_IA64_GPREL64MSB,
+ BFD_RELOC_IA64_GPREL64LSB,
+ BFD_RELOC_IA64_LTOFF22,
+ BFD_RELOC_IA64_LTOFF64I,
+ BFD_RELOC_IA64_PLTOFF22,
+ BFD_RELOC_IA64_PLTOFF64I,
+ BFD_RELOC_IA64_PLTOFF64MSB,
+ BFD_RELOC_IA64_PLTOFF64LSB,
+ BFD_RELOC_IA64_FPTR64I,
+ BFD_RELOC_IA64_FPTR32MSB,
+ BFD_RELOC_IA64_FPTR32LSB,
+ BFD_RELOC_IA64_FPTR64MSB,
+ BFD_RELOC_IA64_FPTR64LSB,
+ BFD_RELOC_IA64_PCREL21B,
+ BFD_RELOC_IA64_PCREL21M,
+ BFD_RELOC_IA64_PCREL21F,
+ BFD_RELOC_IA64_PCREL32MSB,
+ BFD_RELOC_IA64_PCREL32LSB,
+ BFD_RELOC_IA64_PCREL64MSB,
+ BFD_RELOC_IA64_PCREL64LSB,
+ BFD_RELOC_IA64_LTOFF_FPTR22,
+ BFD_RELOC_IA64_LTOFF_FPTR64I,
+ BFD_RELOC_IA64_LTOFF_FPTR64MSB,
+ BFD_RELOC_IA64_LTOFF_FPTR64LSB,
+ BFD_RELOC_IA64_SEGBASE,
+ BFD_RELOC_IA64_SEGREL32MSB,
+ BFD_RELOC_IA64_SEGREL32LSB,
+ BFD_RELOC_IA64_SEGREL64MSB,
+ BFD_RELOC_IA64_SEGREL64LSB,
+ BFD_RELOC_IA64_SECREL32MSB,
+ BFD_RELOC_IA64_SECREL32LSB,
+ BFD_RELOC_IA64_SECREL64MSB,
+ BFD_RELOC_IA64_SECREL64LSB,
+ BFD_RELOC_IA64_REL32MSB,
+ BFD_RELOC_IA64_REL32LSB,
+ BFD_RELOC_IA64_REL64MSB,
+ BFD_RELOC_IA64_REL64LSB,
+ BFD_RELOC_IA64_LTV32MSB,
+ BFD_RELOC_IA64_LTV32LSB,
+ BFD_RELOC_IA64_LTV64MSB,
+ BFD_RELOC_IA64_LTV64LSB,
+ BFD_RELOC_IA64_IPLTMSB,
+ BFD_RELOC_IA64_IPLTLSB,
+ BFD_RELOC_IA64_EPLTMSB,
+ BFD_RELOC_IA64_EPLTLSB,
+ BFD_RELOC_IA64_COPY,
+ BFD_RELOC_IA64_TPREL22,
+ BFD_RELOC_IA64_TPREL64MSB,
+ BFD_RELOC_IA64_TPREL64LSB,
+ BFD_RELOC_IA64_LTOFF_TP22,
+ BFD_RELOC_IA64_LTOFF22X,
+ BFD_RELOC_IA64_LDXMOV,
BFD_RELOC_UNUSED };
typedef enum bfd_reloc_code_real bfd_reloc_code_real_type;
reloc_howto_type *
diff --git a/bfd/config.bfd b/bfd/config.bfd
index eaaac05..9744cbd 100644
--- a/bfd/config.bfd
+++ b/bfd/config.bfd
@@ -80,6 +80,10 @@ case "${targ}" in
alpha*-*-*)
targ_defvec=ecoffalpha_little_vec
;;
+ ia64*-*-linux-gnu* | ia64*-*-elf*)
+ targ_defvec=bfd_elf64_ia64_little_vec
+ targ_selvecs="bfd_elf64_ia64_big_vec bfd_efi_app_ia64_vec"
+ ;;
#endif /* BFD64 */
arc-*-elf*)
diff --git a/bfd/configure b/bfd/configure
index feaae04..63a65ac 100755
--- a/bfd/configure
+++ b/bfd/configure
@@ -5112,6 +5112,13 @@ do
target64=true ;;
bfd_elf64_alpha_vec) tb="$tb elf64-alpha.lo elf64.lo $elf"
target64=true ;;
+ bfd_elf64_ia64_little_vec) tb="$tb elf64-ia64.lo elf64.lo $elf"
+ target64=true ;;
+ bfd_elf64_ia64_big_vec) tb="$tb elf64-ia64.lo elf64.lo $elf"
+ target64=true ;;
+ bfd_efi_app_ia32_vec) tb="$tb efi-app-ia32.lo cofflink.lo" ;;
+ bfd_efi_app_ia64_vec) tb="$tb efi-app-ia64.lo cofflink.lo"
+ target64=true ;;
bfd_elf32_avr_vec) tb="$tb elf32-avr.lo elf32.lo $elf" ;;
bfd_elf32_littlearc_vec) tb="$tb elf32-arc.lo elf32.lo $elf" ;;
bfd_elf32_littlearm_vec) tb="$tb elfarm-nabi.lo elf32.lo $elf" ;;
diff --git a/bfd/configure.host b/bfd/configure.host
index e24cd60..bd5391c 100644
--- a/bfd/configure.host
+++ b/bfd/configure.host
@@ -27,6 +27,8 @@ hppa*-*-mpeix*) HDEFINES=-DHOST_HPPAMPEIX ;;
hppa*-*-bsd*) HDEFINES=-DHOST_HPPABSD ;;
hppa*-*-osf*) HDEFINES=-DHOST_HPPAOSF ;;
+ia64-*-linux*) host64=true; HOST_64BIT_TYPE=long ;;
+
i[3456]86-sequent-bsd*) HDEFINES=-Dshared=genshared ;;
i[3456]86-sequent-sysv4*) ;;
i[3456]86-sequent-sysv*) HDEFINES=-Dshared=genshared ;;
diff --git a/bfd/configure.in b/bfd/configure.in
index e62fcee..1a9b94a 100644
--- a/bfd/configure.in
+++ b/bfd/configure.in
@@ -469,6 +469,13 @@ do
target64=true ;;
bfd_elf64_alpha_vec) tb="$tb elf64-alpha.lo elf64.lo $elf"
target64=true ;;
+ bfd_elf64_ia64_little_vec) tb="$tb elf64-ia64.lo elf64.lo $elf"
+ target64=true ;;
+ bfd_elf64_ia64_big_vec) tb="$tb elf64-ia64.lo elf64.lo $elf"
+ target64=true ;;
+ bfd_efi_app_ia32_vec) tb="$tb efi-app-ia32.lo cofflink.lo" ;;
+ bfd_efi_app_ia64_vec) tb="$tb efi-app-ia64.lo cofflink.lo"
+ target64=true ;;
bfd_elf32_avr_vec) tb="$tb elf32-avr.lo elf32.lo $elf" ;;
bfd_elf32_littlearc_vec) tb="$tb elf32-arc.lo elf32.lo $elf" ;;
bfd_elf32_littlearm_vec) tb="$tb elfarm-nabi.lo elf32.lo $elf" ;;
diff --git a/bfd/cpu-ia64-opc.c b/bfd/cpu-ia64-opc.c
new file mode 100644
index 0000000..130dbe7
--- /dev/null
+++ b/bfd/cpu-ia64-opc.c
@@ -0,0 +1,586 @@
+/* Copyright (C) 1998, 1999 Free Software Foundation, Inc.
+ Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
+
+This file is part of BFD, the Binary File Descriptor library.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
+
+
+/* Logically, this code should be part of libopcode but since some of
+ the operand insertion/extraction functions help bfd to implement
+ relocations, this code is included as part of elf64-ia64.c. This
+ avoids circular dependencies between libopcode and libbfd and also
+ obviates the need for applications to link in libopcode when all
+ they really want is libbfd.
+
+ --davidm Mon Apr 13 22:14:02 1998 */
+
+#include "../opcodes/ia64-opc.h"
+
+#define NELEMS(a) ((int) (sizeof (a) / sizeof ((a)[0])))
+
+static const char*
+ins_rsvd (const struct ia64_operand *self, ia64_insn value, ia64_insn *code)
+{
+ return "internal error---this shouldn't happen";
+}
+
+static const char*
+ext_rsvd (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep)
+{
+ return "internal error---this shouldn't happen";
+}
+
+static const char*
+ins_const (const struct ia64_operand *self, ia64_insn value, ia64_insn *code)
+{
+ return 0;
+}
+
+static const char*
+ext_const (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep)
+{
+ return 0;
+}
+
+static const char*
+ins_reg (const struct ia64_operand *self, ia64_insn value, ia64_insn *code)
+{
+ if (value >= 1u << self->field[0].bits)
+ return "register number out of range";
+
+ *code |= value << self->field[0].shift;
+ return 0;
+}
+
+static const char*
+ext_reg (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep)
+{
+ *valuep = ((code >> self->field[0].shift)
+ & ((1u << self->field[0].bits) - 1));
+ return 0;
+}
+
+static const char*
+ins_immu (const struct ia64_operand *self, ia64_insn value, ia64_insn *code)
+{
+ ia64_insn new = 0;
+ int i;
+
+ for (i = 0; i < NELEMS (self->field) && self->field[i].bits; ++i)
+ {
+ new |= ((value & ((((ia64_insn) 1) << self->field[i].bits) - 1))
+ << self->field[i].shift);
+ value >>= self->field[i].bits;
+ }
+ if (value)
+ return "integer operand out of range";
+
+ *code |= new;
+ return 0;
+}
+
+static const char*
+ext_immu (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep)
+{
+ BFD_HOST_U_64_BIT value = 0;
+ int i, bits = 0, total = 0;
+
+ for (i = 0; i < NELEMS (self->field) && self->field[i].bits; ++i)
+ {
+ bits = self->field[i].bits;
+ value |= ((code >> self->field[i].shift)
+ & ((((BFD_HOST_U_64_BIT) 1) << bits) - 1)) << total;
+ total += bits;
+ }
+ *valuep = value;
+ return 0;
+}
+
+static const char*
+ins_immus8 (const struct ia64_operand *self, ia64_insn value, ia64_insn *code)
+{
+ if (value & 0x7)
+ return "value not an integer multiple of 8";
+ return ins_immu (self, value >> 3, code);
+}
+
+static const char*
+ext_immus8 (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep)
+{
+ const char *result;
+
+ result = ext_immu (self, code, valuep);
+ if (result)
+ return result;
+
+ *valuep = *valuep << 3;
+ return 0;
+}
+
+static const char*
+ins_imms_scaled (const struct ia64_operand *self, ia64_insn value,
+ ia64_insn *code, int scale)
+{
+ BFD_HOST_64_BIT svalue = value, sign_bit;
+ ia64_insn new = 0;
+ int i;
+
+ svalue >>= scale;
+
+ for (i = 0; i < NELEMS (self->field) && self->field[i].bits; ++i)
+ {
+ new |= ((svalue & ((((ia64_insn) 1) << self->field[i].bits) - 1))
+ << self->field[i].shift);
+ sign_bit = (svalue >> (self->field[i].bits - 1)) & 1;
+ svalue >>= self->field[i].bits;
+ }
+ if ((!sign_bit && svalue != 0) || (sign_bit && svalue != -1))
+ return "integer operand out of range";
+
+ *code |= new;
+ return 0;
+}
+
+static const char*
+ext_imms_scaled (const struct ia64_operand *self, ia64_insn code,
+ ia64_insn *valuep, int scale)
+{
+ int i, bits = 0, total = 0, shift;
+ BFD_HOST_64_BIT val = 0;
+
+ for (i = 0; i < NELEMS (self->field) && self->field[i].bits; ++i)
+ {
+ bits = self->field[i].bits;
+ val |= ((code >> self->field[i].shift)
+ & ((((BFD_HOST_U_64_BIT) 1) << bits) - 1)) << total;
+ total += bits;
+ }
+ /* sign extend: */
+ shift = 8*sizeof (val) - total;
+ val = (val << shift) >> shift;
+
+ *valuep = (val << scale);
+ return 0;
+}
+
+static const char*
+ins_imms (const struct ia64_operand *self, ia64_insn value, ia64_insn *code)
+{
+ return ins_imms_scaled (self, value, code, 0);
+}
+
+static const char*
+ins_immsu4 (const struct ia64_operand *self, ia64_insn value, ia64_insn *code)
+{
+ if (value == (BFD_HOST_U_64_BIT) 0x100000000)
+ value = 0;
+ else
+ value = (((BFD_HOST_64_BIT)value << 32) >> 32);
+
+ return ins_imms_scaled (self, value, code, 0);
+}
+
+static const char*
+ext_imms (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep)
+{
+ return ext_imms_scaled (self, code, valuep, 0);
+}
+
+static const char*
+ins_immsm1 (const struct ia64_operand *self, ia64_insn value, ia64_insn *code)
+{
+ --value;
+ return ins_imms_scaled (self, value, code, 0);
+}
+
+static const char*
+ins_immsm1u4 (const struct ia64_operand *self, ia64_insn value,
+ ia64_insn *code)
+{
+ if (value == (BFD_HOST_U_64_BIT) 0x100000000)
+ value = 0;
+ else
+ value = (((BFD_HOST_64_BIT)value << 32) >> 32);
+
+ --value;
+ return ins_imms_scaled (self, value, code, 0);
+}
+
+static const char*
+ext_immsm1 (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep)
+{
+ const char *res = ext_imms_scaled (self, code, valuep, 0);
+
+ ++*valuep;
+ return res;
+}
+
+static const char*
+ins_imms1 (const struct ia64_operand *self, ia64_insn value, ia64_insn *code)
+{
+ return ins_imms_scaled (self, value, code, 1);
+}
+
+static const char*
+ext_imms1 (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep)
+{
+ return ext_imms_scaled (self, code, valuep, 1);
+}
+
+static const char*
+ins_imms4 (const struct ia64_operand *self, ia64_insn value, ia64_insn *code)
+{
+ return ins_imms_scaled (self, value, code, 4);
+}
+
+static const char*
+ext_imms4 (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep)
+{
+ return ext_imms_scaled (self, code, valuep, 4);
+}
+
+static const char*
+ins_imms16 (const struct ia64_operand *self, ia64_insn value, ia64_insn *code)
+{
+ return ins_imms_scaled (self, value, code, 16);
+}
+
+static const char*
+ext_imms16 (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep)
+{
+ return ext_imms_scaled (self, code, valuep, 16);
+}
+
+static const char*
+ins_cimmu (const struct ia64_operand *self, ia64_insn value, ia64_insn *code)
+{
+ ia64_insn mask = (((ia64_insn) 1) << self->field[0].bits) - 1;
+ return ins_immu (self, value ^ mask, code);
+}
+
+static const char*
+ext_cimmu (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep)
+{
+ const char *result;
+ ia64_insn mask;
+
+ mask = (((ia64_insn) 1) << self->field[0].bits) - 1;
+ result = ext_immu (self, code, valuep);
+ if (!result)
+ {
+ mask = (((ia64_insn) 1) << self->field[0].bits) - 1;
+ *valuep ^= mask;
+ }
+ return result;
+}
+
+static const char*
+ins_cnt (const struct ia64_operand *self, ia64_insn value, ia64_insn *code)
+{
+ --value;
+ if (value >= ((BFD_HOST_U_64_BIT) 1) << self->field[0].bits)
+ return "count out of range";
+
+ *code |= value << self->field[0].shift;
+ return 0;
+}
+
+static const char*
+ext_cnt (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep)
+{
+ *valuep = ((code >> self->field[0].shift)
+ & ((((BFD_HOST_U_64_BIT) 1) << self->field[0].bits) - 1)) + 1;
+ return 0;
+}
+
+static const char*
+ins_cnt2b (const struct ia64_operand *self, ia64_insn value, ia64_insn *code)
+{
+ --value;
+
+ if (value > 2)
+ return "count must be in range 1..3";
+
+ *code |= value << self->field[0].shift;
+ return 0;
+}
+
+static const char*
+ext_cnt2b (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep)
+{
+ *valuep = ((code >> self->field[0].shift) & 0x3) + 1;
+ return 0;
+}
+
+static const char*
+ins_cnt2c (const struct ia64_operand *self, ia64_insn value, ia64_insn *code)
+{
+ switch (value)
+ {
+ case 0: value = 0; break;
+ case 7: value = 1; break;
+ case 15: value = 2; break;
+ case 16: value = 3; break;
+ default: return "count must be 0, 7, 15, or 16";
+ }
+ *code |= value << self->field[0].shift;
+ return 0;
+}
+
+static const char*
+ext_cnt2c (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep)
+{
+ ia64_insn value;
+
+ value = (code >> self->field[0].shift) & 0x3;
+ switch (value)
+ {
+ case 0: value = 0; break;
+ case 1: value = 7; break;
+ case 2: value = 15; break;
+ case 3: value = 16; break;
+ }
+ *valuep = value;
+ return 0;
+}
+
+static const char*
+ins_inc3 (const struct ia64_operand *self, ia64_insn value, ia64_insn *code)
+{
+ BFD_HOST_64_BIT val = value;
+ BFD_HOST_U_64_BIT sign = 0;
+
+ if (val < 0)
+ {
+ sign = 0x4;
+ value = -value;
+ }
+ switch (value)
+ {
+ case 1: value = 3; break;
+ case 4: value = 2; break;
+ case 8: value = 1; break;
+ case 16: value = 0; break;
+ default: return "count must be +/- 1, 4, 8, or 16";
+ }
+ *code |= (sign | value) << self->field[0].shift;
+ return 0;
+}
+
+static const char*
+ext_inc3 (const struct ia64_operand *self, ia64_insn code, ia64_insn *valuep)
+{
+ BFD_HOST_64_BIT val;
+ int negate;
+
+ val = (code >> self->field[0].shift) & 0x7;
+ negate = val & 0x4;
+ switch (val & 0x3)
+ {
+ case 0: val = 16; break;
+ case 1: val = 8; break;
+ case 2: val = 4; break;
+ case 3: val = 1; break;
+ }
+ if (negate)
+ val = -val;
+
+ *valuep = val;
+ return 0;
+}
+
+#define CST IA64_OPND_CLASS_CST
+#define REG IA64_OPND_CLASS_REG
+#define IND IA64_OPND_CLASS_IND
+#define ABS IA64_OPND_CLASS_ABS
+#define REL IA64_OPND_CLASS_REL
+
+#define SDEC IA64_OPND_FLAG_DECIMAL_SIGNED
+#define UDEC IA64_OPND_FLAG_DECIMAL_UNSIGNED
+
+const struct ia64_operand elf64_ia64_operands[IA64_OPND_COUNT] =
+ {
+ /* constants: */
+ { CST, ins_const, ext_const, "NIL", {{ 0, }}, 0, "<none>" },
+ { CST, ins_const, ext_const, "ar.ccv", {{ 0, }}, 0, "ar.ccv" },
+ { CST, ins_const, ext_const, "ar.pfs", {{ 0, }}, 0, "ar.pfs" },
+ { CST, ins_const, ext_const, "1", {{ 0, }}, 0, "1" },
+ { CST, ins_const, ext_const, "8", {{ 0, }}, 0, "1" },
+ { CST, ins_const, ext_const, "16", {{ 0, }}, 0, "16" },
+ { CST, ins_const, ext_const, "r0", {{ 0, }}, 0, "r0" },
+ { CST, ins_const, ext_const, "ip", {{ 0, }}, 0, "ip" },
+ { CST, ins_const, ext_const, "pr", {{ 0, }}, 0, "pr" },
+ { CST, ins_const, ext_const, "pr.rot", {{ 0, }}, 0, "pr.rot" },
+ { CST, ins_const, ext_const, "psr", {{ 0, }}, 0, "psr" },
+ { CST, ins_const, ext_const, "psr.l", {{ 0, }}, 0, "psr.l" },
+ { CST, ins_const, ext_const, "psr.um", {{ 0, }}, 0, "psr.um" },
+
+ /* register operands: */
+ { REG, ins_reg, ext_reg, "ar", {{ 7, 20}}, 0, /* AR3 */
+ "an application register" },
+ { REG, ins_reg, ext_reg, "b", {{ 3, 6}}, 0, /* B1 */
+ "a branch register" },
+ { REG, ins_reg, ext_reg, "b", {{ 3, 13}}, 0, /* B2 */
+ "a branch register"},
+ { REG, ins_reg, ext_reg, "cr", {{ 7, 20}}, 0, /* CR */
+ "a control register"},
+ { REG, ins_reg, ext_reg, "f", {{ 7, 6}}, 0, /* F1 */
+ "a floating-point register" },
+ { REG, ins_reg, ext_reg, "f", {{ 7, 13}}, 0, /* F2 */
+ "a floating-point register" },
+ { REG, ins_reg, ext_reg, "f", {{ 7, 20}}, 0, /* F3 */
+ "a floating-point register" },
+ { REG, ins_reg, ext_reg, "f", {{ 7, 27}}, 0, /* F4 */
+ "a floating-point register" },
+ { REG, ins_reg, ext_reg, "p", {{ 6, 6}}, 0, /* P1 */
+ "a predicate register" },
+ { REG, ins_reg, ext_reg, "p", {{ 6, 27}}, 0, /* P2 */
+ "a predicate register" },
+ { REG, ins_reg, ext_reg, "r", {{ 7, 6}}, 0, /* R1 */
+ "a general register" },
+ { REG, ins_reg, ext_reg, "r", {{ 7, 13}}, 0, /* R2 */
+ "a general register" },
+ { REG, ins_reg, ext_reg, "r", {{ 7, 20}}, 0, /* R3 */
+ "a general register" },
+ { REG, ins_reg, ext_reg, "r", {{ 2, 20}}, 0, /* R3_2 */
+ "a general register r0-r3" },
+
+ /* indirect operands: */
+ { IND, ins_reg, ext_reg, "cpuid", {{7, 20}}, 0, /* CPUID_R3 */
+ "a cpuid register" },
+ { IND, ins_reg, ext_reg, "dbr", {{7, 20}}, 0, /* DBR_R3 */
+ "a dbr register" },
+ { IND, ins_reg, ext_reg, "dtr", {{7, 20}}, 0, /* DTR_R3 */
+ "a dtr register" },
+ { IND, ins_reg, ext_reg, "itr", {{7, 20}}, 0, /* ITR_R3 */
+ "an itr register" },
+ { IND, ins_reg, ext_reg, "ibr", {{7, 20}}, 0, /* IBR_R3 */
+ "an ibr register" },
+ { IND, ins_reg, ext_reg, "", {{7, 20}}, 0, /* MR3 */
+ "an indirect memory address" },
+ { IND, ins_reg, ext_reg, "msr", {{7, 20}}, 0, /* MSR_R3 */
+ "an msr register" },
+ { IND, ins_reg, ext_reg, "pkr", {{7, 20}}, 0, /* PKR_R3 */
+ "a pkr register" },
+ { IND, ins_reg, ext_reg, "pmc", {{7, 20}}, 0, /* PMC_R3 */
+ "a pmc register" },
+ { IND, ins_reg, ext_reg, "pmd", {{7, 20}}, 0, /* PMD_R3 */
+ "a pmd register" },
+ { IND, ins_reg, ext_reg, "rr", {{7, 20}}, 0, /* RR_R3 */
+ "an rr register" },
+
+ /* immediate operands: */
+ { ABS, ins_cimmu, ext_cimmu, 0, {{ 5, 20 }}, UDEC, /* CCNT5 */
+ "a 5-bit count (0-31)" },
+ { ABS, ins_cnt, ext_cnt, 0, {{ 2, 27 }}, UDEC, /* CNT2a */
+ "a 2-bit count (1-4)" },
+ { ABS, ins_cnt2b, ext_cnt2b, 0, {{ 2, 27 }}, UDEC, /* CNT2b */
+ "a 2-bit count (1-3)" },
+ { ABS, ins_cnt2c, ext_cnt2c, 0, {{ 2, 30 }}, UDEC, /* CNT2c */
+ "a count (0, 7, 15, or 16)" },
+ { ABS, ins_immu, ext_immu, 0, {{ 5, 14}}, UDEC, /* CNT5 */
+ "a 5-bit count (0-31)" },
+ { ABS, ins_immu, ext_immu, 0, {{ 6, 27}}, UDEC, /* CNT6 */
+ "a 6-bit count (0-63)" },
+ { ABS, ins_cimmu, ext_cimmu, 0, {{ 6, 20}}, UDEC, /* CPOS6a */
+ "a 6-bit bit pos (0-63)" },
+ { ABS, ins_cimmu, ext_cimmu, 0, {{ 6, 14}}, UDEC, /* CPOS6b */
+ "a 6-bit bit pos (0-63)" },
+ { ABS, ins_cimmu, ext_cimmu, 0, {{ 6, 31}}, UDEC, /* CPOS6c */
+ "a 6-bit bit pos (0-63)" },
+ { ABS, ins_imms, ext_imms, 0, {{ 1, 36}}, SDEC, /* IMM1 */
+ "a 1-bit integer (-1, 0)" },
+ { ABS, ins_immu, ext_immu, 0, {{ 2, 13}}, UDEC, /* IMMU2 */
+ "a 2-bit unsigned (0-3)" },
+ { ABS, ins_immu, ext_immu, 0, {{ 7, 13}}, 0, /* IMMU7a */
+ "a 7-bit unsigned (0-127)" },
+ { ABS, ins_immu, ext_immu, 0, {{ 7, 20}}, 0, /* IMMU7b */
+ "a 7-bit unsigned (0-127)" },
+ { ABS, ins_immu, ext_immu, 0, {{ 7, 13}}, UDEC, /* SOF */
+ "a frame size (register count)" },
+ { ABS, ins_immu, ext_immu, 0, {{ 7, 20}}, UDEC, /* SOL */
+ "a local register count" },
+ { ABS, ins_immus8,ext_immus8,0, {{ 4, 27}}, UDEC, /* SOR */
+ "a rotating register count (integer multiple of 8)" },
+ { ABS, ins_imms, ext_imms, 0, /* IMM8 */
+ {{ 7, 13}, { 1, 36}}, SDEC,
+ "an 8-bit integer (-128-127)" },
+ { ABS, ins_immsu4, ext_imms, 0, /* IMM8U4 */
+ {{ 7, 13}, { 1, 36}}, SDEC,
+ "an 8-bit signed integer for 32-bit unsigned compare (-128-127)" },
+ { ABS, ins_immsm1, ext_immsm1, 0, /* IMM8M1 */
+ {{ 7, 13}, { 1, 36}}, SDEC,
+ "an 8-bit integer (-127-128)" },
+ { ABS, ins_immsm1u4, ext_immsm1, 0, /* IMM8M1U4 */
+ {{ 7, 13}, { 1, 36}}, SDEC,
+ "an 8-bit integer for 32-bit unsigned compare (-127-(-1),1-128,0x100000000)" },
+ { ABS, ins_immsm1, ext_immsm1, 0, /* IMM8M1U8 */
+ {{ 7, 13}, { 1, 36}}, SDEC,
+ "an 8-bit integer for 64-bit unsigned compare (-127-(-1),1-128,0x10000000000000000)" },
+ { ABS, ins_immu, ext_immu, 0, {{ 2, 33}, { 7, 20}}, 0, /* IMMU9 */
+ "a 9-bit unsigned (0-511)" },
+ { ABS, ins_imms, ext_imms, 0, /* IMM9a */
+ {{ 7, 6}, { 1, 27}, { 1, 36}}, SDEC,
+ "a 9-bit integer (-256-255)" },
+ { ABS, ins_imms, ext_imms, 0, /* IMM9b */
+ {{ 7, 13}, { 1, 27}, { 1, 36}}, SDEC,
+ "a 9-bit integer (-256-255)" },
+ { ABS, ins_imms, ext_imms, 0, /* IMM14 */
+ {{ 7, 13}, { 6, 27}, { 1, 36}}, SDEC,
+ "a 14-bit integer (-8192-8191)" },
+ { ABS, ins_imms1, ext_imms1, 0, /* IMM17 */
+ {{ 7, 6}, { 8, 24}, { 1, 36}}, 0,
+ "a 17-bit integer (-65536-65535)" },
+ { ABS, ins_immu, ext_immu, 0, {{20, 6}, { 1, 36}}, 0, /* IMMU21 */
+ "a 21-bit unsigned" },
+ { ABS, ins_imms, ext_imms, 0, /* IMM22 */
+ {{ 7, 13}, { 9, 27}, { 5, 22}, { 1, 36}}, SDEC,
+ "a 22-bit integer" },
+ { ABS, ins_immu, ext_immu, 0, /* IMMU24 */
+ {{21, 6}, { 2, 31}, { 1, 36}}, 0,
+ "a 24-bit unsigned" },
+ { ABS, ins_imms16,ext_imms16,0, {{27, 6}, { 1, 36}}, 0, /* IMM44 */
+ "a 44-bit unsigned (least 16 bits ignored/zeroes)" },
+ { ABS, ins_rsvd, ext_rsvd, 0, {{0, 0}}, 0, /* IMMU62 */
+ "a 62-bit unsigned" },
+ { ABS, ins_rsvd, ext_rsvd, 0, {{0, 0}}, 0, /* IMMU64 */
+ "a 64-bit unsigned" },
+ { ABS, ins_inc3, ext_inc3, 0, {{ 3, 13}}, SDEC, /* INC3 */
+ "an increment (+/- 1, 4, 8, or 16)" },
+ { ABS, ins_cnt, ext_cnt, 0, {{ 4, 27}}, UDEC, /* LEN4 */
+ "a 4-bit length (1-16)" },
+ { ABS, ins_cnt, ext_cnt, 0, {{ 6, 27}}, UDEC, /* LEN6 */
+ "a 6-bit length (1-64)" },
+ { ABS, ins_immu, ext_immu, 0, {{ 4, 20}}, 0, /* MBTYPE4 */
+ "a mix type (@rev, @mix, @shuf, @alt, or @brcst)" },
+ { ABS, ins_immu, ext_immu, 0, {{ 8, 20}}, 0, /* MBTYPE8 */
+ "an 8-bit mix type" },
+ { ABS, ins_immu, ext_immu, 0, {{ 6, 14}}, UDEC, /* POS6 */
+ "a 6-bit bit pos (0-63)" },
+ { REL, ins_imms4, ext_imms4, 0, {{ 7, 6}, { 2, 33}}, 0, /* TAG13 */
+ "a branch tag" },
+ { REL, ins_imms4, ext_imms4, 0, {{ 9, 24}}, 0, /* TAG13b */
+ "a branch tag" },
+ { REL, ins_imms4, ext_imms4, 0, {{20, 6}, { 1, 36}}, 0, /* TGT25 */
+ "a branch target" },
+ { REL, ins_imms4, ext_imms4, 0, /* TGT25b */
+ {{ 7, 6}, {13, 20}, { 1, 36}}, 0,
+ "a branch target" },
+ { REL, ins_imms4, ext_imms4, 0, {{20, 13}, { 1, 36}}, 0, /* TGT25c */
+ "a branch target" },
+ { REL, ins_rsvd, ext_rsvd, 0, {{0, 0}}, 0, /* TGT64 */
+ "a branch target" },
+ };
diff --git a/bfd/cpu-ia64.c b/bfd/cpu-ia64.c
new file mode 100644
index 0000000..8069b1a
--- /dev/null
+++ b/bfd/cpu-ia64.c
@@ -0,0 +1,42 @@
+/* BFD support for the ia64 architecture.
+ Copyright (C) 1998, 1999 Free Software Foundation, Inc.
+ Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
+
+This file is part of BFD, the Binary File Descriptor library.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
+
+
+#include "bfd.h"
+#include "sysdep.h"
+#include "libbfd.h"
+
+const bfd_arch_info_type bfd_ia64_arch =
+ {
+ 64, /* 64 bits in a word */
+ 64, /* 64 bits in an address */
+ 8, /* 8 bits in a byte */
+ bfd_arch_ia64,
+ 0, /* only 1 machine */
+ "ia64",
+ "ia64",
+ 3, /* log2 of section alignment */
+ true, /* the one and only */
+ bfd_default_compatible,
+ bfd_default_scan ,
+ 0,
+ };
+
+#include "cpu-ia64-opc.c"
diff --git a/bfd/elf.c b/bfd/elf.c
index 8830ff0..82b9b45 100644
--- a/bfd/elf.c
+++ b/bfd/elf.c
@@ -3237,6 +3237,9 @@ prep_headers (abfd)
case bfd_arch_i386:
i_ehdrp->e_machine = EM_386;
break;
+ case bfd_arch_ia64:
+ i_ehdrp->e_machine = EM_IA_64;
+ break;
case bfd_arch_m68k:
i_ehdrp->e_machine = EM_68K;
break;
diff --git a/bfd/elf64-ia64.c b/bfd/elf64-ia64.c
new file mode 100644
index 0000000..c24c91d
--- /dev/null
+++ b/bfd/elf64-ia64.c
@@ -0,0 +1,3696 @@
+/* IA-64 support for 64-bit ELF
+ Copyright 1998, 1999 Free Software Foundation, Inc.
+ Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
+
+This file is part of BFD, the Binary File Descriptor library.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
+
+#include "bfd.h"
+#include "sysdep.h"
+#include "libbfd.h"
+#include "elf-bfd.h"
+#include "opcode/ia64.h"
+#include "elf/ia64.h"
+
+/*
+ * THE RULES for all the stuff the linker creates --
+ *
+ * GOT Entries created in response to LTOFF or LTOFF_FPTR
+ * relocations. Dynamic relocs created for dynamic
+ * symbols in an application; REL relocs for locals
+ * in a shared library.
+ *
+ * FPTR The canonical function descriptor. Created for local
+ * symbols in applications. Descriptors for dynamic symbols
+ * and local symbols in shared libraries are created by
+ * ld.so. Thus there are no dynamic relocs against these
+ * objects. The FPTR relocs for such _are_ passed through
+ * to the dynamic relocation tables.
+ *
+ * FULL_PLT Created for a PCREL21B relocation against a dynamic symbol.
+ * Requires the creation of a PLTOFF entry. This does not
+ * require any dynamic relocations.
+ *
+ * PLTOFF Created by PLTOFF relocations. For local symbols, this
+ * is an alternate function descriptor, and in shared libraries
+ * requires two REL relocations. Note that this cannot be
+ * transformed into an FPTR relocation, since it must be in
+ * range of the GP. For dynamic symbols, this is a function
+ * descriptor for a MIN_PLT entry, and requires one IPLT reloc.
+ *
+ * MIN_PLT Created by PLTOFF entries against dynamic symbols. This
+ * does not reqire dynamic relocations.
+ */
+
+#define USE_RELA /* we want RELA relocs, not REL */
+
+#define NELEMS(a) ((int) (sizeof (a) / sizeof ((a)[0])))
+
+typedef struct bfd_hash_entry *(*new_hash_entry_func)
+ PARAMS ((struct bfd_hash_entry *, struct bfd_hash_table *, const char *));
+
+/* In dynamically (linker-) created sections, we generally need to keep track
+ of the place a symbol or expression got allocated to. This is done via hash
+ tables that store entries of the following type. */
+
+struct elf64_ia64_dyn_sym_info
+{
+ /* The addend for which this entry is relevant. */
+ bfd_vma addend;
+
+ /* Next addend in the list. */
+ struct elf64_ia64_dyn_sym_info *next;
+
+ bfd_vma got_offset;
+ bfd_vma fptr_offset;
+ bfd_vma pltoff_offset;
+ bfd_vma plt_offset;
+ bfd_vma plt2_offset;
+
+ /* The symbol table entry, if any, that this was derrived from. */
+ struct elf_link_hash_entry *h;
+
+ /* Used to count non-got, non-plt relocations for delayed sizing
+ of relocation sections. */
+ struct elf64_ia64_dyn_reloc_entry
+ {
+ struct elf64_ia64_dyn_reloc_entry *next;
+ asection *srel;
+ int type;
+ int count;
+ } *reloc_entries;
+
+ /* True when the section contents have been updated. */
+ unsigned got_done : 1;
+ unsigned fptr_done : 1;
+ unsigned pltoff_done : 1;
+
+ /* True for the different kinds of linker data we want created. */
+ unsigned want_got : 1;
+ unsigned want_fptr : 1;
+ unsigned want_ltoff_fptr : 1;
+ unsigned want_plt : 1;
+ unsigned want_plt2 : 1;
+ unsigned want_pltoff : 1;
+};
+
+struct elf64_ia64_local_hash_entry
+{
+ struct bfd_hash_entry root;
+ struct elf64_ia64_dyn_sym_info *info;
+};
+
+struct elf64_ia64_local_hash_table
+{
+ struct bfd_hash_table root;
+ /* No additional fields for now. */
+};
+
+struct elf64_ia64_link_hash_entry
+{
+ struct elf_link_hash_entry root;
+ struct elf64_ia64_dyn_sym_info *info;
+};
+
+struct elf64_ia64_link_hash_table
+{
+ /* The main hash table */
+ struct elf_link_hash_table root;
+
+ asection *got_sec; /* the linkage table section (or NULL) */
+ asection *rel_got_sec; /* dynamic relocation section for same */
+ asection *fptr_sec; /* function descriptor table (or NULL) */
+ asection *plt_sec; /* the primary plt section (or NULL) */
+ asection *pltoff_sec; /* private descriptors for plt (or NULL) */
+ asection *rel_pltoff_sec; /* dynamic relocation section for same */
+
+ bfd_size_type minplt_entries; /* number of minplt entries */
+
+ struct elf64_ia64_local_hash_table loc_hash_table;
+};
+
+#define elf64_ia64_hash_table(p) \
+ ((struct elf64_ia64_link_hash_table *) ((p)->hash))
+
+static bfd_reloc_status_type elf64_ia64_reloc
+ PARAMS ((bfd *abfd, arelent *reloc, asymbol *sym, PTR data,
+ asection *input_section, bfd *output_bfd, char **error_message));
+static reloc_howto_type * lookup_howto
+ PARAMS ((unsigned int rtype));
+static reloc_howto_type *elf64_ia64_reloc_type_lookup
+ PARAMS ((bfd *abfd, bfd_reloc_code_real_type bfd_code));
+static void elf64_ia64_info_to_howto
+ PARAMS ((bfd *abfd, arelent *bfd_reloc, Elf64_Internal_Rela *elf_reloc));
+static boolean elf64_ia64_section_from_shdr
+ PARAMS ((bfd *, Elf64_Internal_Shdr *, char *));
+static boolean elf64_ia64_fake_sections
+ PARAMS ((bfd *abfd, Elf64_Internal_Shdr *hdr, asection *sec));
+static boolean elf64_ia64_add_symbol_hook
+ PARAMS ((bfd *abfd, struct bfd_link_info *info, const Elf_Internal_Sym *sym,
+ const char **namep, flagword *flagsp, asection **secp,
+ bfd_vma *valp));
+static int elf64_ia64_additional_program_headers
+ PARAMS ((bfd *abfd));
+static boolean elf64_ia64_is_local_label_name
+ PARAMS ((bfd *abfd, const char *name));
+static boolean elf64_ia64_dynamic_symbol_p
+ PARAMS ((struct elf_link_hash_entry *h, struct bfd_link_info *info));
+static boolean elf64_ia64_local_hash_table_init
+ PARAMS ((struct elf64_ia64_local_hash_table *ht, bfd *abfd,
+ new_hash_entry_func new));
+static struct bfd_hash_entry *elf64_ia64_new_loc_hash_entry
+ PARAMS ((struct bfd_hash_entry *entry, struct bfd_hash_table *table,
+ const char *string));
+static struct bfd_hash_entry *elf64_ia64_new_elf_hash_entry
+ PARAMS ((struct bfd_hash_entry *entry, struct bfd_hash_table *table,
+ const char *string));
+static struct bfd_link_hash_table *elf64_ia64_hash_table_create
+ PARAMS ((bfd *abfd));
+static struct elf64_ia64_local_hash_entry *elf64_ia64_local_hash_lookup
+ PARAMS ((struct elf64_ia64_local_hash_table *table, const char *string,
+ boolean create, boolean copy));
+static void elf64_ia64_dyn_sym_traverse
+ PARAMS ((struct elf64_ia64_link_hash_table *ia64_info,
+ boolean (*func)(struct elf64_ia64_dyn_sym_info *, PTR),
+ PTR info));
+static boolean elf64_ia64_create_dynamic_sections
+ PARAMS ((bfd *abfd, struct bfd_link_info *info));
+static struct elf64_ia64_dyn_sym_info * get_dyn_sym_info
+ PARAMS ((struct elf64_ia64_link_hash_table *ia64_info,
+ struct elf_link_hash_entry *h,
+ bfd *abfd, const Elf_Internal_Rela *rel, boolean create));
+static asection *get_got
+ PARAMS ((bfd *abfd, struct bfd_link_info *info,
+ struct elf64_ia64_link_hash_table *ia64_info));
+static asection *get_fptr
+ PARAMS ((bfd *abfd, struct bfd_link_info *info,
+ struct elf64_ia64_link_hash_table *ia64_info));
+static asection *get_pltoff
+ PARAMS ((bfd *abfd, struct bfd_link_info *info,
+ struct elf64_ia64_link_hash_table *ia64_info));
+static asection *get_reloc_section
+ PARAMS ((bfd *abfd, struct elf64_ia64_link_hash_table *ia64_info,
+ asection *sec, boolean create));
+static boolean count_dyn_reloc
+ PARAMS ((bfd *abfd, struct elf64_ia64_dyn_sym_info *dyn_i,
+ asection *srel, int type));
+static boolean elf64_ia64_check_relocs
+ PARAMS ((bfd *abfd, struct bfd_link_info *info, asection *sec,
+ const Elf_Internal_Rela *relocs));
+static boolean elf64_ia64_adjust_dynamic_symbol
+ PARAMS ((struct bfd_link_info *info, struct elf_link_hash_entry *h));
+static unsigned long global_sym_index
+ PARAMS ((struct elf_link_hash_entry *h));
+static boolean allocate_fptr
+ PARAMS ((struct elf64_ia64_dyn_sym_info *dyn_i, PTR data));
+static boolean allocate_global_data_got
+ PARAMS ((struct elf64_ia64_dyn_sym_info *dyn_i, PTR data));
+static boolean allocate_global_fptr_got
+ PARAMS ((struct elf64_ia64_dyn_sym_info *dyn_i, PTR data));
+static boolean allocate_local_got
+ PARAMS ((struct elf64_ia64_dyn_sym_info *dyn_i, PTR data));
+static boolean allocate_pltoff_entries
+ PARAMS ((struct elf64_ia64_dyn_sym_info *dyn_i, PTR data));
+static boolean allocate_plt_entries
+ PARAMS ((struct elf64_ia64_dyn_sym_info *dyn_i, PTR data));
+static boolean allocate_plt2_entries
+ PARAMS ((struct elf64_ia64_dyn_sym_info *dyn_i, PTR data));
+static boolean allocate_dynrel_entries
+ PARAMS ((struct elf64_ia64_dyn_sym_info *dyn_i, PTR data));
+static boolean elf64_ia64_size_dynamic_sections
+ PARAMS ((bfd *output_bfd, struct bfd_link_info *info));
+static bfd_reloc_status_type elf64_ia64_install_value
+ PARAMS ((bfd *abfd, bfd_byte *hit_addr, bfd_vma val, unsigned int r_type));
+static void elf64_ia64_install_dyn_reloc
+ PARAMS ((bfd *abfd, struct bfd_link_info *info, asection *sec,
+ asection *srel, bfd_vma offset, unsigned int type,
+ long dynindx, bfd_vma addend));
+static bfd_vma set_got_entry
+ PARAMS ((bfd *abfd, struct bfd_link_info *info,
+ struct elf64_ia64_dyn_sym_info *dyn_i, long dynindx,
+ bfd_vma addend, bfd_vma value, unsigned int dyn_r_type));
+static bfd_vma set_fptr_entry
+ PARAMS ((bfd *abfd, struct bfd_link_info *info,
+ struct elf64_ia64_dyn_sym_info *dyn_i,
+ bfd_vma value));
+static bfd_vma set_pltoff_entry
+ PARAMS ((bfd *abfd, struct bfd_link_info *info,
+ struct elf64_ia64_dyn_sym_info *dyn_i,
+ bfd_vma value, boolean));
+static boolean elf64_ia64_final_link
+ PARAMS ((bfd *abfd, struct bfd_link_info *info));
+static boolean elf64_ia64_relocate_section
+ PARAMS ((bfd *output_bfd, struct bfd_link_info *info, bfd *input_bfd,
+ asection *input_section, bfd_byte *contents,
+ Elf_Internal_Rela *relocs, Elf_Internal_Sym *local_syms,
+ asection **local_sections));
+static boolean elf64_ia64_finish_dynamic_symbol
+ PARAMS ((bfd *output_bfd, struct bfd_link_info *info,
+ struct elf_link_hash_entry *h, Elf_Internal_Sym *sym));
+static boolean elf64_ia64_finish_dynamic_sections
+ PARAMS ((bfd *abfd, struct bfd_link_info *info));
+static boolean elf64_ia64_set_private_flags
+ PARAMS ((bfd *abfd, flagword flags));
+static boolean elf64_ia64_copy_private_bfd_data
+ PARAMS ((bfd *ibfd, bfd *obfd));
+static boolean elf64_ia64_merge_private_bfd_data
+ PARAMS ((bfd *ibfd, bfd *obfd));
+static boolean elf64_ia64_print_private_bfd_data
+ PARAMS ((bfd *abfd, PTR ptr));
+
+
+/* ia64-specific relocation */
+
+/* Perform a relocation. Not much to do here as all the hard work is
+ done in elf64_ia64_final_link_relocate. */
+static bfd_reloc_status_type
+elf64_ia64_reloc (abfd, reloc, sym, data, input_section,
+ output_bfd, error_message)
+ bfd *abfd;
+ arelent *reloc;
+ asymbol *sym;
+ PTR data;
+ asection *input_section;
+ bfd *output_bfd;
+ char **error_message;
+{
+ if (output_bfd)
+ {
+ reloc->address += input_section->output_offset;
+ return bfd_reloc_ok;
+ }
+ *error_message = "Unsupported call to elf64_ia64_reloc";
+ return bfd_reloc_notsupported;
+}
+
+#define IA64_HOWTO(TYPE, NAME, SIZE, PCREL, IN) \
+ HOWTO (TYPE, 0, SIZE, 0, PCREL, 0, complain_overflow_signed, \
+ elf64_ia64_reloc, NAME, false, 0, 0, IN)
+
+/* This table has to be sorted according to increasing number of the
+ TYPE field. */
+static reloc_howto_type ia64_howto_table[] =
+ {
+ IA64_HOWTO (R_IA64_NONE, "NONE", 0, false, true),
+
+ IA64_HOWTO (R_IA64_IMM14, "IMM14", 0, false, true),
+ IA64_HOWTO (R_IA64_IMM22, "IMM22", 0, false, true),
+ IA64_HOWTO (R_IA64_IMM64, "IMM64", 0, false, true),
+ IA64_HOWTO (R_IA64_DIR32MSB, "DIR32MSB", 2, false, true),
+ IA64_HOWTO (R_IA64_DIR32LSB, "DIR32LSB", 2, false, true),
+ IA64_HOWTO (R_IA64_DIR64MSB, "DIR64MSB", 4, false, true),
+ IA64_HOWTO (R_IA64_DIR64LSB, "DIR64LSB", 4, false, true),
+
+ IA64_HOWTO (R_IA64_GPREL22, "GPREL22", 0, false, true),
+ IA64_HOWTO (R_IA64_GPREL64I, "GPREL64I", 0, false, true),
+ IA64_HOWTO (R_IA64_GPREL32MSB, "GPREL32MSB", 2, false, true),
+ IA64_HOWTO (R_IA64_GPREL32LSB, "GPREL32LSB", 2, false, true),
+ IA64_HOWTO (R_IA64_GPREL64MSB, "GPREL64MSB", 4, false, true),
+ IA64_HOWTO (R_IA64_GPREL64LSB, "GPREL64LSB", 4, false, true),
+
+ IA64_HOWTO (R_IA64_LTOFF22, "LTOFF22", 0, false, true),
+ IA64_HOWTO (R_IA64_LTOFF64I, "LTOFF64I", 0, false, true),
+
+ IA64_HOWTO (R_IA64_PLTOFF22, "PLTOFF22", 0, false, true),
+ IA64_HOWTO (R_IA64_PLTOFF64I, "PLTOFF64I", 0, false, true),
+ IA64_HOWTO (R_IA64_PLTOFF64MSB, "PLTOFF64MSB", 4, false, true),
+ IA64_HOWTO (R_IA64_PLTOFF64LSB, "PLTOFF64LSB", 4, false, true),
+
+ IA64_HOWTO (R_IA64_FPTR64I, "FPTR64I", 4, false, true),
+ IA64_HOWTO (R_IA64_FPTR32MSB, "FPTR32MSB", 2, false, true),
+ IA64_HOWTO (R_IA64_FPTR32LSB, "FPTR32LSB", 2, false, true),
+ IA64_HOWTO (R_IA64_FPTR64MSB, "FPTR64MSB", 4, false, true),
+ IA64_HOWTO (R_IA64_FPTR64LSB, "FPTR64LSB", 4, false, true),
+
+ IA64_HOWTO (R_IA64_PCREL21B, "PCREL21B", 0, true, true),
+ IA64_HOWTO (R_IA64_PCREL21M, "PCREL21M", 0, true, true),
+ IA64_HOWTO (R_IA64_PCREL21F, "PCREL21F", 0, true, true),
+ IA64_HOWTO (R_IA64_PCREL32MSB, "PCREL32MSB", 2, true, true),
+ IA64_HOWTO (R_IA64_PCREL32LSB, "PCREL32LSB", 2, true, true),
+ IA64_HOWTO (R_IA64_PCREL64MSB, "PCREL64MSB", 4, true, true),
+ IA64_HOWTO (R_IA64_PCREL64LSB, "PCREL64LSB", 4, true, true),
+
+ IA64_HOWTO (R_IA64_LTOFF_FPTR22, "LTOFF_FPTR22", 4, false, true),
+ IA64_HOWTO (R_IA64_LTOFF_FPTR64I, "LTOFF_FPTR64I", 4, false, true),
+ IA64_HOWTO (R_IA64_LTOFF_FPTR64MSB, "LTOFF_FPTR64MSB", 4, false, true),
+ IA64_HOWTO (R_IA64_LTOFF_FPTR64LSB, "LTOFF_FPTR64LSB", 4, false, true),
+
+ IA64_HOWTO (R_IA64_SEGBASE, "SEGBASE", 4, false, true),
+ IA64_HOWTO (R_IA64_SEGREL32MSB, "SEGREL32MSB", 2, false, true),
+ IA64_HOWTO (R_IA64_SEGREL32LSB, "SEGREL32LSB", 2, false, true),
+ IA64_HOWTO (R_IA64_SEGREL64MSB, "SEGREL64MSB", 4, false, true),
+ IA64_HOWTO (R_IA64_SEGREL64LSB, "SEGREL64LSB", 4, false, true),
+
+ IA64_HOWTO (R_IA64_SECREL32MSB, "SECREL32MSB", 2, false, true),
+ IA64_HOWTO (R_IA64_SECREL32LSB, "SECREL32LSB", 2, false, true),
+ IA64_HOWTO (R_IA64_SECREL64MSB, "SECREL64MSB", 4, false, true),
+ IA64_HOWTO (R_IA64_SECREL64LSB, "SECREL64LSB", 4, false, true),
+
+ IA64_HOWTO (R_IA64_REL32MSB, "REL32MSB", 2, false, true),
+ IA64_HOWTO (R_IA64_REL32LSB, "REL32LSB", 2, false, true),
+ IA64_HOWTO (R_IA64_REL64MSB, "REL64MSB", 4, false, true),
+ IA64_HOWTO (R_IA64_REL64LSB, "REL64LSB", 4, false, true),
+
+ IA64_HOWTO (R_IA64_LTV32MSB, "LTV32MSB", 2, false, true),
+ IA64_HOWTO (R_IA64_LTV32LSB, "LTV32LSB", 2, false, true),
+ IA64_HOWTO (R_IA64_LTV64MSB, "LTV64MSB", 4, false, true),
+ IA64_HOWTO (R_IA64_LTV64LSB, "LTV64LSB", 4, false, true),
+
+ IA64_HOWTO (R_IA64_IPLTMSB, "IPLTMSB", 4, false, true),
+ IA64_HOWTO (R_IA64_IPLTLSB, "IPLTLSB", 4, false, true),
+ IA64_HOWTO (R_IA64_EPLTMSB, "EPLTMSB", 4, false, true),
+ IA64_HOWTO (R_IA64_EPLTLSB, "EPLTLSB", 4, false, true),
+ IA64_HOWTO (R_IA64_COPY, "COPY", 4, false, true),
+ IA64_HOWTO (R_IA64_LTOFF22X, "LTOFF22X", 0, false, true),
+ IA64_HOWTO (R_IA64_LDXMOV, "LDXMOV", 0, false, true),
+
+ IA64_HOWTO (R_IA64_TPREL22, "TPREL22", 4, false, false),
+ IA64_HOWTO (R_IA64_TPREL64MSB, "TPREL64MSB", 8, false, false),
+ IA64_HOWTO (R_IA64_TPREL64LSB, "TPREL64LSB", 8, false, false),
+ IA64_HOWTO (R_IA64_LTOFF_TP22, "LTOFF_TP22", 4, false, false),
+ };
+
+static unsigned char elf_code_to_howto_index[R_IA64_MAX_RELOC_CODE + 1];
+
+/* Given a BFD reloc type, return the matching HOWTO structure. */
+
+static reloc_howto_type*
+lookup_howto (rtype)
+ unsigned int rtype;
+{
+ static int inited = 0;
+ int i;
+
+ if (!inited)
+ {
+ inited = 1;
+
+ memset (elf_code_to_howto_index, 0xff, sizeof (elf_code_to_howto_index));
+ for (i = 0; i < NELEMS (ia64_howto_table); ++i)
+ elf_code_to_howto_index[ia64_howto_table[i].type] = i;
+ }
+
+ BFD_ASSERT (rtype <= R_IA64_MAX_RELOC_CODE);
+ i = elf_code_to_howto_index[rtype];
+ if (i >= NELEMS (ia64_howto_table))
+ return 0;
+ return ia64_howto_table + i;
+}
+
+static reloc_howto_type*
+elf64_ia64_reloc_type_lookup (abfd, bfd_code)
+ bfd *abfd;
+ bfd_reloc_code_real_type bfd_code;
+{
+ unsigned int rtype;
+
+ switch (bfd_code)
+ {
+ case BFD_RELOC_NONE: rtype = R_IA64_NONE; break;
+
+ case BFD_RELOC_IA64_IMM14: rtype = R_IA64_IMM14; break;
+ case BFD_RELOC_IA64_IMM22: rtype = R_IA64_IMM22; break;
+ case BFD_RELOC_IA64_IMM64: rtype = R_IA64_IMM64; break;
+
+ case BFD_RELOC_IA64_DIR32MSB: rtype = R_IA64_DIR32MSB; break;
+ case BFD_RELOC_IA64_DIR32LSB: rtype = R_IA64_DIR32LSB; break;
+ case BFD_RELOC_IA64_DIR64MSB: rtype = R_IA64_DIR64MSB; break;
+ case BFD_RELOC_IA64_DIR64LSB: rtype = R_IA64_DIR64LSB; break;
+
+ case BFD_RELOC_IA64_GPREL22: rtype = R_IA64_GPREL22; break;
+ case BFD_RELOC_IA64_GPREL64I: rtype = R_IA64_GPREL64I; break;
+ case BFD_RELOC_IA64_GPREL32MSB: rtype = R_IA64_GPREL32MSB; break;
+ case BFD_RELOC_IA64_GPREL32LSB: rtype = R_IA64_GPREL32LSB; break;
+ case BFD_RELOC_IA64_GPREL64MSB: rtype = R_IA64_GPREL64MSB; break;
+ case BFD_RELOC_IA64_GPREL64LSB: rtype = R_IA64_GPREL64LSB; break;
+
+ case BFD_RELOC_IA64_LTOFF22: rtype = R_IA64_LTOFF22; break;
+ case BFD_RELOC_IA64_LTOFF64I: rtype = R_IA64_LTOFF64I; break;
+
+ case BFD_RELOC_IA64_PLTOFF22: rtype = R_IA64_PLTOFF22; break;
+ case BFD_RELOC_IA64_PLTOFF64I: rtype = R_IA64_PLTOFF64I; break;
+ case BFD_RELOC_IA64_PLTOFF64MSB: rtype = R_IA64_PLTOFF64MSB; break;
+ case BFD_RELOC_IA64_PLTOFF64LSB: rtype = R_IA64_PLTOFF64LSB; break;
+ case BFD_RELOC_IA64_FPTR64I: rtype = R_IA64_FPTR64I; break;
+ case BFD_RELOC_IA64_FPTR32MSB: rtype = R_IA64_FPTR32MSB; break;
+ case BFD_RELOC_IA64_FPTR32LSB: rtype = R_IA64_FPTR32LSB; break;
+ case BFD_RELOC_IA64_FPTR64MSB: rtype = R_IA64_FPTR64MSB; break;
+ case BFD_RELOC_IA64_FPTR64LSB: rtype = R_IA64_FPTR64LSB; break;
+
+ case BFD_RELOC_IA64_PCREL21B: rtype = R_IA64_PCREL21B; break;
+ case BFD_RELOC_IA64_PCREL21M: rtype = R_IA64_PCREL21M; break;
+ case BFD_RELOC_IA64_PCREL21F: rtype = R_IA64_PCREL21F; break;
+ case BFD_RELOC_IA64_PCREL32MSB: rtype = R_IA64_PCREL32MSB; break;
+ case BFD_RELOC_IA64_PCREL32LSB: rtype = R_IA64_PCREL32LSB; break;
+ case BFD_RELOC_IA64_PCREL64MSB: rtype = R_IA64_PCREL64MSB; break;
+ case BFD_RELOC_IA64_PCREL64LSB: rtype = R_IA64_PCREL64LSB; break;
+
+ case BFD_RELOC_IA64_LTOFF_FPTR22: rtype = R_IA64_LTOFF_FPTR22; break;
+ case BFD_RELOC_IA64_LTOFF_FPTR64I: rtype = R_IA64_LTOFF_FPTR64I; break;
+ case BFD_RELOC_IA64_LTOFF_FPTR64MSB: rtype = R_IA64_LTOFF_FPTR64MSB; break;
+ case BFD_RELOC_IA64_LTOFF_FPTR64LSB: rtype = R_IA64_LTOFF_FPTR64LSB; break;
+
+ case BFD_RELOC_IA64_SEGBASE: rtype = R_IA64_SEGBASE; break;
+ case BFD_RELOC_IA64_SEGREL32MSB: rtype = R_IA64_SEGREL32MSB; break;
+ case BFD_RELOC_IA64_SEGREL32LSB: rtype = R_IA64_SEGREL32LSB; break;
+ case BFD_RELOC_IA64_SEGREL64MSB: rtype = R_IA64_SEGREL64MSB; break;
+ case BFD_RELOC_IA64_SEGREL64LSB: rtype = R_IA64_SEGREL64LSB; break;
+
+ case BFD_RELOC_IA64_SECREL32MSB: rtype = R_IA64_SECREL32MSB; break;
+ case BFD_RELOC_IA64_SECREL32LSB: rtype = R_IA64_SECREL32LSB; break;
+ case BFD_RELOC_IA64_SECREL64MSB: rtype = R_IA64_SECREL64MSB; break;
+ case BFD_RELOC_IA64_SECREL64LSB: rtype = R_IA64_SECREL64LSB; break;
+
+ case BFD_RELOC_IA64_REL32MSB: rtype = R_IA64_REL32MSB; break;
+ case BFD_RELOC_IA64_REL32LSB: rtype = R_IA64_REL32LSB; break;
+ case BFD_RELOC_IA64_REL64MSB: rtype = R_IA64_REL64MSB; break;
+ case BFD_RELOC_IA64_REL64LSB: rtype = R_IA64_REL64LSB; break;
+
+ case BFD_RELOC_IA64_LTV32MSB: rtype = R_IA64_LTV32MSB; break;
+ case BFD_RELOC_IA64_LTV32LSB: rtype = R_IA64_LTV32LSB; break;
+ case BFD_RELOC_IA64_LTV64MSB: rtype = R_IA64_LTV64MSB; break;
+ case BFD_RELOC_IA64_LTV64LSB: rtype = R_IA64_LTV64LSB; break;
+
+ case BFD_RELOC_IA64_IPLTMSB: rtype = R_IA64_IPLTMSB; break;
+ case BFD_RELOC_IA64_IPLTLSB: rtype = R_IA64_IPLTLSB; break;
+ case BFD_RELOC_IA64_EPLTMSB: rtype = R_IA64_EPLTMSB; break;
+ case BFD_RELOC_IA64_EPLTLSB: rtype = R_IA64_EPLTLSB; break;
+ case BFD_RELOC_IA64_COPY: rtype = R_IA64_COPY; break;
+ case BFD_RELOC_IA64_LTOFF22X: rtype = R_IA64_LTOFF22X; break;
+ case BFD_RELOC_IA64_LDXMOV: rtype = R_IA64_LDXMOV; break;
+
+ case BFD_RELOC_IA64_TPREL22: rtype = R_IA64_TPREL22; break;
+ case BFD_RELOC_IA64_TPREL64MSB: rtype = R_IA64_TPREL64MSB; break;
+ case BFD_RELOC_IA64_TPREL64LSB: rtype = R_IA64_TPREL64LSB; break;
+ case BFD_RELOC_IA64_LTOFF_TP22: rtype = R_IA64_LTOFF_TP22; break;
+
+ default: return 0;
+ }
+ return lookup_howto (rtype);
+}
+
+/* Given a ELF reloc, return the matching HOWTO structure. */
+
+static void
+elf64_ia64_info_to_howto (abfd, bfd_reloc, elf_reloc)
+ bfd *abfd;
+ arelent *bfd_reloc;
+ Elf64_Internal_Rela *elf_reloc;
+{
+ bfd_reloc->howto = lookup_howto (ELF64_R_TYPE (elf_reloc->r_info));
+}
+
+#define PLT_HEADER_SIZE (3 * 16)
+#define PLT_MIN_ENTRY_SIZE (1 * 16)
+#define PLT_FULL_ENTRY_SIZE (2 * 16)
+#define PLT_RESERVED_WORDS 3
+
+static const bfd_byte plt_header[PLT_HEADER_SIZE] =
+{
+ 0x0b, 0x10, 0x00, 0x1c, 0x00, 0x21, /* [MMI] mov r2=r14;; */
+ 0xe0, 0x00, 0x08, 0x00, 0x48, 0x00, /* addl r14=0,r2 */
+ 0x00, 0x00, 0x04, 0x00, /* nop.i 0x0;; */
+ 0x0b, 0x80, 0x20, 0x1c, 0x18, 0x14, /* [MMI] ld8 r16=[r14],8;; */
+ 0x10, 0x41, 0x38, 0x30, 0x28, 0x00, /* ld8 r17=[r14],8 */
+ 0x00, 0x00, 0x04, 0x00, /* nop.i 0x0;; */
+ 0x11, 0x08, 0x00, 0x1c, 0x18, 0x10, /* [MIB] ld8 r1=[r14] */
+ 0x60, 0x88, 0x04, 0x80, 0x03, 0x00, /* mov b6=r17 */
+ 0x60, 0x00, 0x80, 0x00 /* br.few b6;; */
+};
+
+static const bfd_byte plt_min_entry[PLT_MIN_ENTRY_SIZE] =
+{
+ 0x11, 0x78, 0x00, 0x00, 0x00, 0x24, /* [MIB] mov r15=0 */
+ 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, /* nop.i 0x0 */
+ 0x00, 0x00, 0x00, 0x40 /* br.few 0 <PLT0>;; */
+};
+
+static const bfd_byte plt_full_entry[PLT_FULL_ENTRY_SIZE] =
+{
+ 0x0b, 0x78, 0x00, 0x02, 0x00, 0x24, /* [MMI] addl r15=0,r1;; */
+ 0x00, 0x41, 0x3c, 0x30, 0x28, 0xc0, /* ld8 r16=[r15],8 */
+ 0x01, 0x08, 0x00, 0x84, /* mov r14=r1;; */
+ 0x11, 0x08, 0x00, 0x1e, 0x18, 0x10, /* [MIB] ld8 r1=[r15] */
+ 0x60, 0x80, 0x04, 0x80, 0x03, 0x00, /* mov b6=r16 */
+ 0x60, 0x00, 0x80, 0x00 /* br.few b6;; */
+};
+
+#define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
+
+/* Handle an IA-64 specific section when reading an object file. This
+ is called when elfcode.h finds a section with an unknown type. */
+
+static boolean
+elf64_ia64_section_from_shdr (abfd, hdr, name)
+ bfd *abfd;
+ Elf64_Internal_Shdr *hdr;
+ char *name;
+{
+ asection *newsect;
+
+ /* There ought to be a place to keep ELF backend specific flags, but
+ at the moment there isn't one. We just keep track of the
+ sections by their name, instead. Fortunately, the ABI gives
+ suggested names for all the MIPS specific sections, so we will
+ probably get away with this. */
+ switch (hdr->sh_type)
+ {
+ case SHT_IA_64_UNWIND:
+ if (strcmp (name, ELF_STRING_ia64_unwind) != 0)
+ return false;
+ break;
+
+ case SHT_IA_64_EXT:
+ if (strcmp (name, ELF_STRING_ia64_archext) != 0)
+ return false;
+ break;
+
+ default:
+ return false;
+ }
+
+ if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name))
+ return false;
+ newsect = hdr->bfd_section;
+
+ if (hdr->sh_flags & SHF_IA_64_SHORT)
+ newsect->flags |= SEC_SMALL_DATA;
+
+ return true;
+}
+
+/* Set the correct type for an IA-64 ELF section. We do this by the
+ section name, which is a hack, but ought to work. */
+
+static boolean
+elf64_ia64_fake_sections (abfd, hdr, sec)
+ bfd *abfd;
+ Elf64_Internal_Shdr *hdr;
+ asection *sec;
+{
+ register const char *name;
+
+ name = bfd_get_section_name (abfd, sec);
+
+ if (strcmp (name, ELF_STRING_ia64_unwind) == 0)
+ hdr->sh_type = SHT_IA_64_UNWIND;
+ else if (strcmp (name, ELF_STRING_ia64_archext) == 0)
+ hdr->sh_type = SHT_IA_64_EXT;
+ else if (strcmp (name, ".reloc") == 0)
+ /*
+ * This is an ugly, but unfortunately necessary hack that is
+ * needed when producing EFI binaries on IA-64. It tells
+ * elf.c:elf_fake_sections() not to consider ".reloc" as a section
+ * containing ELF relocation info. We need this hack in order to
+ * be able to generate ELF binaries that can be translated into
+ * EFI applications (which are essentially COFF objects). Those
+ * files contain a COFF ".reloc" section inside an ELF64 object,
+ * which would normally cause BFD to segfault because it would
+ * attempt to interpret this section as containing relocation
+ * entries for section "oc". With this hack enabled, ".reloc"
+ * will be treated as a normal data section, which will avoid the
+ * segfault. However, you won't be able to create an ELF64 binary
+ * with a section named "oc" that needs relocations, but that's
+ * the kind of ugly side-effects you get when detecting section
+ * types based on their names... In practice, this limitation is
+ * unlikely to bite.
+ */
+ hdr->sh_type = SHT_PROGBITS;
+
+ if (sec->flags & SEC_SMALL_DATA)
+ hdr->sh_flags |= SHF_IA_64_SHORT;
+
+ return true;
+}
+
+/* Hook called by the linker routine which adds symbols from an object
+ file. We use it to put .comm items in .sbss, and not .bss. */
+
+static boolean
+elf64_ia64_add_symbol_hook (abfd, info, sym, namep, flagsp, secp, valp)
+ bfd *abfd;
+ struct bfd_link_info *info;
+ const Elf_Internal_Sym *sym;
+ const char **namep;
+ flagword *flagsp;
+ asection **secp;
+ bfd_vma *valp;
+{
+ if (sym->st_shndx == SHN_COMMON
+ && !info->relocateable
+ && sym->st_size <= bfd_get_gp_size (abfd))
+ {
+ /* Common symbols less than or equal to -G nn bytes are
+ automatically put into .sbss. */
+
+ asection *scomm = bfd_get_section_by_name (abfd, ".scommon");
+
+ if (scomm == NULL)
+ {
+ scomm = bfd_make_section (abfd, ".scommon");
+ if (scomm == NULL
+ || !bfd_set_section_flags (abfd, scomm, (SEC_ALLOC
+ | SEC_IS_COMMON
+ | SEC_LINKER_CREATED)))
+ return false;
+ }
+
+ *secp = scomm;
+ *valp = sym->st_size;
+ }
+
+ return true;
+}
+
+/* Return the number of additional phdrs we will need. */
+
+static int
+elf64_ia64_additional_program_headers (abfd)
+ bfd *abfd;
+{
+ asection *s;
+ int ret = 0;
+
+ /* See if we need a PT_IA_64_ARCHEXT segment. */
+ s = bfd_get_section_by_name (abfd, ELF_STRING_ia64_archext);
+ if (s && (s->flags & SEC_LOAD))
+ ++ret;
+
+ /* See if we need a PT_IA_64_UNWIND segment. */
+ s = bfd_get_section_by_name (abfd, ELF_STRING_ia64_unwind);
+ if (s && (s->flags & SEC_LOAD))
+ ++ret;
+
+ return ret;
+}
+
+static boolean
+elf64_ia64_modify_segment_map (abfd)
+ bfd *abfd;
+{
+ struct elf_segment_map *m, **pm;
+ asection *s;
+
+ /* If we need a PT_IA_64_ARCHEXT segment, it must come before
+ all PT_LOAD segments. */
+ s = bfd_get_section_by_name (abfd, ELF_STRING_ia64_archext);
+ if (s && (s->flags & SEC_LOAD))
+ {
+ for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
+ if (m->p_type == PT_IA_64_ARCHEXT)
+ break;
+ if (m == NULL)
+ {
+ m = (struct elf_segment_map *) bfd_zalloc (abfd, sizeof *m);
+ if (m == NULL)
+ return false;
+
+ m->p_type = PT_IA_64_ARCHEXT;
+ m->count = 1;
+ m->sections[0] = s;
+
+ /* We want to put it after the PHDR and INTERP segments. */
+ pm = &elf_tdata (abfd)->segment_map;
+ while (*pm != NULL
+ && ((*pm)->p_type == PT_PHDR
+ || (*pm)->p_type == PT_INTERP))
+ pm = &(*pm)->next;
+
+ m->next = *pm;
+ *pm = m;
+ }
+ }
+
+ /* Install the PT_IA_64_UNWIND segment, if needed. */
+ s = bfd_get_section_by_name (abfd, ELF_STRING_ia64_unwind);
+ if (s && (s->flags & SEC_LOAD))
+ {
+ for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
+ if (m->p_type == PT_IA_64_UNWIND)
+ break;
+ if (m == NULL)
+ {
+ m = (struct elf_segment_map *) bfd_zalloc (abfd, sizeof *m);
+ if (m == NULL)
+ return false;
+
+ m->p_type = PT_IA_64_UNWIND;
+ m->count = 1;
+ m->sections[0] = s;
+ m->next = NULL;
+
+ /* We want to put it last. */
+ pm = &elf_tdata (abfd)->segment_map;
+ while (*pm != NULL)
+ pm = &(*pm)->next;
+ *pm = m;
+ }
+ }
+
+ /* Turn on PF_IA_64_NORECOV if needed. This involves traversing all of
+ the input sections for each output section in the segment and testing
+ for SHF_IA_64_NORECOV on each. */
+ for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
+ if (m->p_type == PT_LOAD)
+ {
+ int i;
+ for (i = m->count - 1; i >= 0; --i)
+ {
+ struct bfd_link_order *order = m->sections[i]->link_order_head;
+ while (order)
+ {
+ if (order->type == bfd_indirect_link_order)
+ {
+ asection *is = order->u.indirect.section;
+ bfd_vma flags = elf_section_data(is)->this_hdr.sh_flags;
+ if (flags & SHF_IA_64_NORECOV)
+ {
+ m->p_flags |= PF_IA_64_NORECOV;
+ goto found;
+ }
+ }
+ order = order->next;
+ }
+ }
+ found:;
+ }
+
+ return true;
+}
+
+
+/* According to the Tahoe assembler spec, all labels starting with a
+ '.' are local. */
+
+static boolean
+elf64_ia64_is_local_label_name (abfd, name)
+ bfd *abfd;
+ const char *name;
+{
+ return name[0] == '.';
+}
+
+/* Should we do dynamic things to this symbol? */
+
+static boolean
+elf64_ia64_dynamic_symbol_p (h, info)
+ struct elf_link_hash_entry *h;
+ struct bfd_link_info *info;
+{
+ if (h == NULL)
+ return false;
+
+ while (h->root.type == bfd_link_hash_indirect
+ || h->root.type == bfd_link_hash_warning)
+ h = (struct elf_link_hash_entry *) h->root.u.i.link;
+
+ if (h->dynindx == -1)
+ return false;
+
+ if (h->root.type == bfd_link_hash_undefweak
+ || h->root.type == bfd_link_hash_defweak)
+ return true;
+
+ if ((info->shared && !info->symbolic)
+ || ((h->elf_link_hash_flags
+ & (ELF_LINK_HASH_DEF_DYNAMIC | ELF_LINK_HASH_REF_REGULAR))
+ == (ELF_LINK_HASH_DEF_DYNAMIC | ELF_LINK_HASH_REF_REGULAR)))
+ return true;
+
+ return false;
+}
+
+static boolean
+elf64_ia64_local_hash_table_init (ht, abfd, new)
+ struct elf64_ia64_local_hash_table *ht;
+ bfd *abfd;
+ new_hash_entry_func new;
+{
+ memset (ht, 0, sizeof(*ht));
+ return bfd_hash_table_init (&ht->root, new);
+}
+
+static struct bfd_hash_entry*
+elf64_ia64_new_loc_hash_entry (entry, table, string)
+ struct bfd_hash_entry *entry;
+ struct bfd_hash_table *table;
+ const char *string;
+{
+ struct elf64_ia64_local_hash_entry *ret;
+ ret = (struct elf64_ia64_local_hash_entry *) entry;
+
+ /* Allocate the structure if it has not already been allocated by a
+ subclass. */
+ if (!ret)
+ ret = bfd_hash_allocate (table, sizeof (*ret));
+
+ if (!ret)
+ return 0;
+
+ /* Initialize our local data. All zeros, and definitely easier
+ than setting a handful of bit fields. */
+ memset (ret, 0, sizeof(*ret));
+
+ /* Call the allocation method of the superclass. */
+ ret = ((struct elf64_ia64_local_hash_entry *)
+ bfd_hash_newfunc ((struct bfd_hash_entry *) ret, table, string));
+
+ return (struct bfd_hash_entry *) ret;
+}
+
+static struct bfd_hash_entry*
+elf64_ia64_new_elf_hash_entry (entry, table, string)
+ struct bfd_hash_entry *entry;
+ struct bfd_hash_table *table;
+ const char *string;
+{
+ struct elf64_ia64_link_hash_entry *ret;
+ ret = (struct elf64_ia64_link_hash_entry *) entry;
+
+ /* Allocate the structure if it has not already been allocated by a
+ subclass. */
+ if (!ret)
+ ret = bfd_hash_allocate (table, sizeof (*ret));
+
+ if (!ret)
+ return 0;
+
+ /* Initialize our local data. All zeros, and definitely easier
+ than setting a handful of bit fields. */
+ memset (ret, 0, sizeof(*ret));
+
+ /* Call the allocation method of the superclass. */
+ ret = ((struct elf64_ia64_link_hash_entry *)
+ _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
+ table, string));
+
+ return (struct bfd_hash_entry *) ret;
+}
+
+static void
+elf64_ia64_hash_copy_indirect (xdir, xind)
+ struct elf_link_hash_entry *xdir, *xind;
+{
+ struct elf64_ia64_link_hash_entry *dir, *ind;
+
+ dir = (struct elf64_ia64_link_hash_entry *)xdir;
+ ind = (struct elf64_ia64_link_hash_entry *)xind;
+
+ /* Copy down any references that we may have already seen to the
+ symbol which just became indirect. */
+
+ dir->root.elf_link_hash_flags |=
+ (ind->root.elf_link_hash_flags
+ & (ELF_LINK_HASH_REF_DYNAMIC
+ | ELF_LINK_HASH_REF_REGULAR
+ | ELF_LINK_HASH_REF_REGULAR_NONWEAK));
+
+ /* Copy over the got and plt data. This would have been done
+ by check_relocs. */
+
+ if (dir->info == NULL)
+ {
+ struct elf64_ia64_dyn_sym_info *dyn_i;
+
+ dir->info = dyn_i = ind->info;
+ ind->info = NULL;
+
+ /* Fix up the dyn_sym_info pointers to the global symbol. */
+ for (; dyn_i; dyn_i = dyn_i->next)
+ dyn_i->h = &dir->root;
+ }
+ BFD_ASSERT (ind->info == NULL);
+
+ /* Copy over the dynindx. */
+
+ if (dir->root.dynindx == -1)
+ {
+ dir->root.dynindx = ind->root.dynindx;
+ dir->root.dynstr_index = ind->root.dynstr_index;
+ ind->root.dynindx = -1;
+ ind->root.dynstr_index = 0;
+ }
+ BFD_ASSERT (ind->root.dynindx == -1);
+}
+
+static void
+elf64_ia64_hash_hide_symbol (xh)
+ struct elf_link_hash_entry *xh;
+{
+ struct elf64_ia64_link_hash_entry *h;
+ struct elf64_ia64_dyn_sym_info *dyn_i;
+
+ h = (struct elf64_ia64_link_hash_entry *)xh;
+
+ h->root.elf_link_hash_flags &= ~ELF_LINK_HASH_NEEDS_PLT;
+ h->root.dynindx = -1;
+
+ for (dyn_i = h->info; dyn_i; dyn_i = dyn_i->next)
+ dyn_i->want_plt2 = 0;
+}
+
+/* Create the derived linker hash table. The IA-64 ELF port uses this
+ derived hash table to keep information specific to the IA-64 ElF
+ linker (without using static variables). */
+
+static struct bfd_link_hash_table*
+elf64_ia64_hash_table_create (abfd)
+ bfd *abfd;
+{
+ struct elf64_ia64_link_hash_table *ret;
+
+ ret = bfd_alloc (abfd, sizeof (*ret));
+ if (!ret)
+ return 0;
+ if (!_bfd_elf_link_hash_table_init (&ret->root, abfd,
+ elf64_ia64_new_elf_hash_entry))
+ {
+ bfd_release (abfd, ret);
+ return 0;
+ }
+
+ if (!elf64_ia64_local_hash_table_init (&ret->loc_hash_table, abfd,
+ elf64_ia64_new_loc_hash_entry))
+ return 0;
+ return &ret->root.root;
+}
+
+/* Look up an entry in a Alpha ELF linker hash table. */
+
+static INLINE struct elf64_ia64_local_hash_entry *
+elf64_ia64_local_hash_lookup(table, string, create, copy)
+ struct elf64_ia64_local_hash_table *table;
+ const char *string;
+ boolean create, copy;
+{
+ return ((struct elf64_ia64_local_hash_entry *)
+ bfd_hash_lookup (&table->root, string, create, copy));
+}
+
+/* Traverse both local and global hash tables. */
+
+struct elf64_ia64_dyn_sym_traverse_data
+{
+ boolean (*func) PARAMS ((struct elf64_ia64_dyn_sym_info *, PTR));
+ PTR data;
+};
+
+static boolean
+elf64_ia64_global_dyn_sym_thunk (xentry, xdata)
+ struct bfd_hash_entry *xentry;
+ PTR xdata;
+{
+ struct elf64_ia64_link_hash_entry *entry
+ = (struct elf64_ia64_link_hash_entry *) xentry;
+ struct elf64_ia64_dyn_sym_traverse_data *data
+ = (struct elf64_ia64_dyn_sym_traverse_data *) xdata;
+ struct elf64_ia64_dyn_sym_info *dyn_i;
+
+ for (dyn_i = entry->info; dyn_i; dyn_i = dyn_i->next)
+ if (! (*data->func) (dyn_i, data->data))
+ return false;
+ return true;
+}
+
+static boolean
+elf64_ia64_local_dyn_sym_thunk (xentry, xdata)
+ struct bfd_hash_entry *xentry;
+ PTR xdata;
+{
+ struct elf64_ia64_local_hash_entry *entry
+ = (struct elf64_ia64_local_hash_entry *) xentry;
+ struct elf64_ia64_dyn_sym_traverse_data *data
+ = (struct elf64_ia64_dyn_sym_traverse_data *) xdata;
+ struct elf64_ia64_dyn_sym_info *dyn_i;
+
+ for (dyn_i = entry->info; dyn_i; dyn_i = dyn_i->next)
+ if (! (*data->func) (dyn_i, data->data))
+ return false;
+ return true;
+}
+
+static void
+elf64_ia64_dyn_sym_traverse (ia64_info, func, data)
+ struct elf64_ia64_link_hash_table *ia64_info;
+ boolean (*func) PARAMS ((struct elf64_ia64_dyn_sym_info *, PTR));
+ PTR data;
+{
+ struct elf64_ia64_dyn_sym_traverse_data xdata;
+
+ xdata.func = func;
+ xdata.data = data;
+
+ elf_link_hash_traverse (&ia64_info->root,
+ elf64_ia64_global_dyn_sym_thunk, &xdata);
+ bfd_hash_traverse (&ia64_info->loc_hash_table.root,
+ elf64_ia64_local_dyn_sym_thunk, &xdata);
+}
+
+static boolean
+elf64_ia64_create_dynamic_sections (abfd, info)
+ bfd *abfd;
+ struct bfd_link_info *info;
+{
+ struct elf64_ia64_link_hash_table *ia64_info;
+ struct elf_link_hash_entry *h;
+ asection *s;
+
+ if (! _bfd_elf_create_dynamic_sections (abfd, info))
+ return false;
+
+ ia64_info = elf64_ia64_hash_table (info);
+
+ ia64_info->plt_sec = bfd_get_section_by_name (abfd, ".plt");
+ ia64_info->got_sec = bfd_get_section_by_name (abfd, ".got");
+
+ {
+ flagword flags = bfd_get_section_flags (abfd, ia64_info->got_sec);
+ bfd_set_section_flags (abfd, ia64_info->got_sec, SEC_SMALL_DATA | flags);
+ }
+
+ if (!get_pltoff (abfd, info, ia64_info))
+ return false;
+
+ s = bfd_make_section(abfd, ".rela.IA_64.pltoff");
+ if (s == NULL
+ || !bfd_set_section_flags (abfd, s, (SEC_ALLOC | SEC_LOAD
+ | SEC_HAS_CONTENTS
+ | SEC_IN_MEMORY
+ | SEC_LINKER_CREATED
+ | SEC_READONLY))
+ || !bfd_set_section_alignment (abfd, s, 3))
+ return false;
+ ia64_info->rel_pltoff_sec = s;
+
+ s = bfd_make_section(abfd, ".rela.got");
+ if (s == NULL
+ || !bfd_set_section_flags (abfd, s, (SEC_ALLOC | SEC_LOAD
+ | SEC_HAS_CONTENTS
+ | SEC_IN_MEMORY
+ | SEC_LINKER_CREATED
+ | SEC_READONLY))
+ || !bfd_set_section_alignment (abfd, s, 3))
+ return false;
+ ia64_info->rel_got_sec = s;
+
+ return true;
+}
+
+/* Find and/or create a descriptor for dynamic symbol info. This will
+ vary based on global or local symbol, and the addend to the reloc. */
+
+static struct elf64_ia64_dyn_sym_info *
+get_dyn_sym_info (ia64_info, h, abfd, rel, create)
+ struct elf64_ia64_link_hash_table *ia64_info;
+ struct elf_link_hash_entry *h;
+ bfd *abfd;
+ const Elf_Internal_Rela *rel;
+ boolean create;
+{
+ struct elf64_ia64_dyn_sym_info **pp;
+ struct elf64_ia64_dyn_sym_info *dyn_i;
+ bfd_vma addend = rel ? rel->r_addend : 0;
+
+ if (h)
+ pp = &((struct elf64_ia64_link_hash_entry *)h)->info;
+ else
+ {
+ struct elf64_ia64_local_hash_entry *loc_h;
+ char *addr_name;
+ size_t len;
+
+ /* Construct a string for use in the elf64_ia64_local_hash_table.
+ The name describes what was once anonymous memory. */
+
+ len = sizeof(void*)*2 + 1 + sizeof(bfd_vma)*4 + 1 + 1;
+ len += 10; /* %p slop */
+
+ addr_name = alloca (len);
+ sprintf (addr_name, "%p:%lx", abfd, ELF64_R_SYM (rel->r_info));
+
+ /* Collect the canonical entry data for this address. */
+ loc_h = elf64_ia64_local_hash_lookup (&ia64_info->loc_hash_table,
+ addr_name, create, create);
+ BFD_ASSERT (loc_h);
+
+ pp = &loc_h->info;
+ }
+
+ for (dyn_i = *pp; dyn_i && dyn_i->addend != addend; dyn_i = *pp)
+ pp = &dyn_i->next;
+
+ if (dyn_i == NULL && create)
+ {
+ dyn_i = (struct elf64_ia64_dyn_sym_info *)
+ bfd_zalloc (abfd, sizeof *dyn_i);
+ *pp = dyn_i;
+ dyn_i->addend = addend;
+ }
+
+ return dyn_i;
+}
+
+static asection *
+get_got (abfd, info, ia64_info)
+ bfd *abfd;
+ struct bfd_link_info *info;
+ struct elf64_ia64_link_hash_table *ia64_info;
+{
+ asection *got, *srel;
+ bfd *dynobj;
+
+ got = ia64_info->got_sec;
+ if (!got)
+ {
+ flagword flags;
+
+ dynobj = ia64_info->root.dynobj;
+ if (!dynobj)
+ ia64_info->root.dynobj = dynobj = abfd;
+ if (!_bfd_elf_create_got_section (dynobj, info))
+ return 0;
+
+ got = bfd_get_section_by_name (dynobj, ".got");
+ BFD_ASSERT (got);
+ ia64_info->got_sec = got;
+
+ flags = bfd_get_section_flags (abfd, got);
+ bfd_set_section_flags (abfd, got, SEC_SMALL_DATA | flags);
+ }
+
+ return got;
+}
+
+/* Create function descriptor section (.opd). This section is called .opd
+ because it contains "official prodecure descriptors". The "official"
+ refers to the fact that these descriptors are used when taking the address
+ of a procedure, thus ensuring a unique address for each procedure. */
+
+static asection *
+get_fptr (abfd, info, ia64_info)
+ bfd *abfd;
+ struct bfd_link_info *info;
+ struct elf64_ia64_link_hash_table *ia64_info;
+{
+ asection *fptr;
+ bfd *dynobj;
+
+ fptr = ia64_info->fptr_sec;
+ if (!fptr)
+ {
+ dynobj = ia64_info->root.dynobj;
+ if (!dynobj)
+ ia64_info->root.dynobj = dynobj = abfd;
+
+ fptr = bfd_make_section (dynobj, ".opd");
+ if (!fptr
+ || !bfd_set_section_flags (dynobj, fptr,
+ (SEC_ALLOC
+ | SEC_LOAD
+ | SEC_HAS_CONTENTS
+ | SEC_IN_MEMORY
+ | SEC_READONLY
+ | SEC_LINKER_CREATED))
+ || !bfd_set_section_alignment (abfd, fptr, 4))
+ {
+ BFD_ASSERT (0);
+ return NULL;
+ }
+
+ ia64_info->fptr_sec = fptr;
+ }
+
+ return fptr;
+}
+
+static asection *
+get_pltoff (abfd, info, ia64_info)
+ bfd *abfd;
+ struct bfd_link_info *info;
+ struct elf64_ia64_link_hash_table *ia64_info;
+{
+ asection *pltoff;
+ bfd *dynobj;
+
+ pltoff = ia64_info->pltoff_sec;
+ if (!pltoff)
+ {
+ dynobj = ia64_info->root.dynobj;
+ if (!dynobj)
+ ia64_info->root.dynobj = dynobj = abfd;
+
+ pltoff = bfd_make_section (dynobj, ELF_STRING_ia64_pltoff);
+ if (!pltoff
+ || !bfd_set_section_flags (dynobj, pltoff,
+ (SEC_ALLOC
+ | SEC_LOAD
+ | SEC_HAS_CONTENTS
+ | SEC_IN_MEMORY
+ | SEC_SMALL_DATA
+ | SEC_LINKER_CREATED))
+ || !bfd_set_section_alignment (abfd, pltoff, 4))
+ {
+ BFD_ASSERT (0);
+ return NULL;
+ }
+
+ ia64_info->pltoff_sec = pltoff;
+ }
+
+ return pltoff;
+}
+
+static asection *
+get_reloc_section (abfd, ia64_info, sec, create)
+ bfd *abfd;
+ struct elf64_ia64_link_hash_table *ia64_info;
+ asection *sec;
+ boolean create;
+{
+ const char *srel_name;
+ asection *srel;
+ bfd *dynobj;
+
+ srel_name = (bfd_elf_string_from_elf_section
+ (abfd, elf_elfheader(abfd)->e_shstrndx,
+ elf_section_data(sec)->rel_hdr.sh_name));
+ if (srel_name == NULL)
+ return NULL;
+
+ BFD_ASSERT ((strncmp (srel_name, ".rela", 5) == 0
+ && strcmp (bfd_get_section_name (abfd, sec),
+ srel_name+5) == 0)
+ || (strncmp (srel_name, ".rel", 4) == 0
+ && strcmp (bfd_get_section_name (abfd, sec),
+ srel_name+4) == 0));
+
+ dynobj = ia64_info->root.dynobj;
+ if (!dynobj)
+ ia64_info->root.dynobj = dynobj = abfd;
+
+ srel = bfd_get_section_by_name (dynobj, srel_name);
+ if (srel == NULL && create)
+ {
+ srel = bfd_make_section (dynobj, srel_name);
+ if (srel == NULL
+ || !bfd_set_section_flags (dynobj, srel,
+ (SEC_ALLOC
+ | SEC_LOAD
+ | SEC_HAS_CONTENTS
+ | SEC_IN_MEMORY
+ | SEC_LINKER_CREATED
+ | SEC_READONLY))
+ || !bfd_set_section_alignment (dynobj, srel, 3))
+ return NULL;
+ }
+
+ return srel;
+}
+
+static boolean
+count_dyn_reloc (abfd, dyn_i, srel, type)
+ bfd *abfd;
+ struct elf64_ia64_dyn_sym_info *dyn_i;
+ asection *srel;
+ int type;
+{
+ struct elf64_ia64_dyn_reloc_entry *rent;
+
+ for (rent = dyn_i->reloc_entries; rent; rent = rent->next)
+ if (rent->srel == srel && rent->type == type)
+ break;
+
+ if (!rent)
+ {
+ rent = (struct elf64_ia64_dyn_reloc_entry *)
+ bfd_alloc (abfd, sizeof (*rent));
+ if (!rent)
+ return false;
+
+ rent->next = dyn_i->reloc_entries;
+ rent->srel = srel;
+ rent->type = type;
+ rent->count = 0;
+ dyn_i->reloc_entries = rent;
+ }
+ rent->count++;
+
+ return true;
+}
+
+static boolean
+elf64_ia64_check_relocs (abfd, info, sec, relocs)
+ bfd *abfd;
+ struct bfd_link_info *info;
+ asection *sec;
+ const Elf_Internal_Rela *relocs;
+{
+ struct elf64_ia64_link_hash_table *ia64_info;
+ const Elf_Internal_Rela *relend;
+ Elf_Internal_Shdr *symtab_hdr;
+ const Elf_Internal_Rela *rel;
+ asection *got, *fptr, *srel;
+
+ if (info->relocateable)
+ return true;
+
+ symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
+ ia64_info = elf64_ia64_hash_table (info);
+
+ got = fptr = srel = NULL;
+
+ relend = relocs + sec->reloc_count;
+ for (rel = relocs; rel < relend; ++rel)
+ {
+ enum {
+ NEED_GOT = 1,
+ NEED_FPTR = 2,
+ NEED_PLTOFF = 4,
+ NEED_MIN_PLT = 8,
+ NEED_FULL_PLT = 16,
+ NEED_DYNREL = 32,
+ NEED_LTOFF_FPTR = 64,
+ };
+
+ struct elf_link_hash_entry *h = NULL;
+ unsigned long r_symndx = ELF64_R_SYM (rel->r_info);
+ struct elf64_ia64_dyn_sym_info *dyn_i;
+ int need_entry;
+ boolean maybe_dynamic;
+ int dynrel_type;
+
+ if (r_symndx >= symtab_hdr->sh_info)
+ {
+ /* We're dealing with a global symbol -- find its hash entry
+ and mark it as being referenced. */
+ long indx = r_symndx - symtab_hdr->sh_info;
+ h = elf_sym_hashes (abfd)[indx];
+ while (h->root.type == bfd_link_hash_indirect
+ || h->root.type == bfd_link_hash_warning)
+ h = (struct elf_link_hash_entry *) h->root.u.i.link;
+
+ h->elf_link_hash_flags |= ELF_LINK_HASH_REF_REGULAR;
+ }
+
+ /* We can only get preliminary data on whether a symbol is
+ locally or externally defined, as not all of the input files
+ have yet been processed. Do something with what we know, as
+ this may help reduce memory usage and processing time later. */
+ maybe_dynamic = false;
+ if (h && ((info->shared && ! info->symbolic)
+ || ! (h->elf_link_hash_flags & ELF_LINK_HASH_DEF_REGULAR)
+ || h->root.type == bfd_link_hash_defweak))
+ maybe_dynamic = true;
+
+ need_entry = 0;
+ switch (ELF64_R_TYPE (rel->r_info))
+ {
+ case R_IA64_TPREL22:
+ case R_IA64_TPREL64MSB:
+ case R_IA64_TPREL64LSB:
+ case R_IA64_LTOFF_TP22:
+ return false;
+
+ case R_IA64_LTOFF_FPTR22:
+ case R_IA64_LTOFF_FPTR64I:
+ case R_IA64_LTOFF_FPTR64MSB:
+ case R_IA64_LTOFF_FPTR64LSB:
+ need_entry = NEED_FPTR | NEED_GOT | NEED_LTOFF_FPTR;
+ break;
+
+ case R_IA64_FPTR64I:
+ case R_IA64_FPTR32MSB:
+ case R_IA64_FPTR32LSB:
+ case R_IA64_FPTR64MSB:
+ case R_IA64_FPTR64LSB:
+ if (info->shared || h)
+ need_entry = NEED_FPTR | NEED_DYNREL;
+ else
+ need_entry = NEED_FPTR;
+ dynrel_type = R_IA64_FPTR64LSB;
+ break;
+
+ case R_IA64_LTOFF22:
+ case R_IA64_LTOFF22X:
+ case R_IA64_LTOFF64I:
+ need_entry = NEED_GOT;
+ break;
+
+ case R_IA64_PLTOFF22:
+ case R_IA64_PLTOFF64I:
+ case R_IA64_PLTOFF64MSB:
+ case R_IA64_PLTOFF64LSB:
+ need_entry = NEED_PLTOFF;
+ if (h)
+ {
+ if (maybe_dynamic)
+ need_entry |= NEED_MIN_PLT;
+ }
+ else
+ {
+ (*info->callbacks->warning)
+ (info, _("@pltoff reloc against local symbol"), 0,
+ abfd, 0, 0);
+ }
+ break;
+
+ case R_IA64_PCREL21B:
+ /* Depending on where this symbol is defined, we may or may not
+ need a full plt entry. Only skip if we know we'll not need
+ the entry -- static or symbolic, and the symbol definition
+ has already been seen. */
+ if (maybe_dynamic && rel->r_addend == 0)
+ need_entry = NEED_FULL_PLT;
+ break;
+
+ case R_IA64_IMM14:
+ case R_IA64_IMM22:
+ case R_IA64_IMM64:
+ case R_IA64_DIR32MSB:
+ case R_IA64_DIR32LSB:
+ case R_IA64_DIR64MSB:
+ case R_IA64_DIR64LSB:
+ /* Shared objects will always need at least a REL relocation. */
+ if (info->shared || maybe_dynamic)
+ need_entry = NEED_DYNREL;
+ dynrel_type = R_IA64_DIR64LSB;
+ break;
+
+ case R_IA64_PCREL32MSB:
+ case R_IA64_PCREL32LSB:
+ case R_IA64_PCREL64MSB:
+ case R_IA64_PCREL64LSB:
+ if (maybe_dynamic)
+ need_entry = NEED_DYNREL;
+ dynrel_type = R_IA64_PCREL64LSB;
+ break;
+ }
+
+ if (!need_entry)
+ continue;
+
+ if ((need_entry & NEED_FPTR) != 0
+ && rel->r_addend)
+ {
+ (*info->callbacks->warning)
+ (info, _("non-zero addend in @fptr reloc"), 0,
+ abfd, 0, 0);
+ }
+
+ dyn_i = get_dyn_sym_info (ia64_info, h, abfd, rel, true);
+
+ /* Record whether or not this is a local symbol. */
+ dyn_i->h = h;
+
+ /* Create what's needed. */
+ if (need_entry & NEED_GOT)
+ {
+ if (!got)
+ {
+ got = get_got (abfd, info, ia64_info);
+ if (!got)
+ return false;
+ }
+ dyn_i->want_got = 1;
+ }
+ if (need_entry & NEED_FPTR)
+ {
+ if (!fptr)
+ {
+ fptr = get_fptr (abfd, info, ia64_info);
+ if (!fptr)
+ return false;
+ }
+
+ /* FPTRs for shared libraries are allocated by the dynamic
+ linker. Make sure this local symbol will appear in the
+ dynamic symbol table. */
+ if (!h && info->shared)
+ {
+ if (! (_bfd_elf64_link_record_local_dynamic_symbol
+ (info, abfd, r_symndx)))
+ return false;
+ }
+
+ dyn_i->want_fptr = 1;
+ }
+ if (need_entry & NEED_LTOFF_FPTR)
+ dyn_i->want_ltoff_fptr = 1;
+ if (need_entry & (NEED_MIN_PLT | NEED_FULL_PLT))
+ {
+ if (!ia64_info->root.dynobj)
+ ia64_info->root.dynobj = abfd;
+ h->elf_link_hash_flags |= ELF_LINK_HASH_NEEDS_PLT;
+ dyn_i->want_plt = 1;
+ }
+ if (need_entry & NEED_FULL_PLT)
+ dyn_i->want_plt2 = 1;
+ if (need_entry & NEED_PLTOFF)
+ dyn_i->want_pltoff = 1;
+ if ((need_entry & NEED_DYNREL) && (sec->flags & SEC_ALLOC))
+ {
+ if (!srel)
+ {
+ srel = get_reloc_section (abfd, ia64_info, sec, true);
+ if (!srel)
+ return false;
+ }
+ if (!count_dyn_reloc (abfd, dyn_i, srel, dynrel_type))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+struct elf64_ia64_allocate_data
+{
+ struct bfd_link_info *info;
+ bfd_size_type ofs;
+};
+
+/* For cleanliness, and potentially faster dynamic loading, allocate
+ external GOT entries first. */
+
+static boolean
+allocate_global_data_got (dyn_i, data)
+ struct elf64_ia64_dyn_sym_info *dyn_i;
+ PTR data;
+{
+ struct elf64_ia64_allocate_data *x = (struct elf64_ia64_allocate_data *)data;
+
+ if (dyn_i->want_got
+ && ! dyn_i->want_fptr
+ && elf64_ia64_dynamic_symbol_p (dyn_i->h, x->info))
+ {
+ dyn_i->got_offset = x->ofs;
+ x->ofs += 8;
+ }
+ return true;
+}
+
+/* Next, allocate all the GOT entries used by LTOFF_FPTR relocs. */
+
+static boolean
+allocate_global_fptr_got (dyn_i, data)
+ struct elf64_ia64_dyn_sym_info *dyn_i;
+ PTR data;
+{
+ struct elf64_ia64_allocate_data *x = (struct elf64_ia64_allocate_data *)data;
+
+ if (dyn_i->want_got
+ && dyn_i->want_fptr
+ && elf64_ia64_dynamic_symbol_p (dyn_i->h, x->info))
+ {
+ dyn_i->got_offset = x->ofs;
+ x->ofs += 8;
+ }
+ return true;
+}
+
+/* Lastly, allocate all the GOT entries for local data. */
+
+static boolean
+allocate_local_got (dyn_i, data)
+ struct elf64_ia64_dyn_sym_info *dyn_i;
+ PTR data;
+{
+ struct elf64_ia64_allocate_data *x = (struct elf64_ia64_allocate_data *)data;
+
+ if (dyn_i->want_got
+ && ! elf64_ia64_dynamic_symbol_p (dyn_i->h, x->info))
+ {
+ dyn_i->got_offset = x->ofs;
+ x->ofs += 8;
+ }
+ return true;
+}
+
+/* Search for the index of a global symbol in it's defining object file. */
+
+static unsigned long
+global_sym_index (h)
+ struct elf_link_hash_entry *h;
+{
+ struct elf_link_hash_entry **p;
+ bfd *obj;
+
+ BFD_ASSERT (h->root.type == bfd_link_hash_defined
+ || h->root.type == bfd_link_hash_defweak);
+
+ obj = h->root.u.def.section->owner;
+ for (p = elf_sym_hashes (obj); *p != h; ++p)
+ continue;
+
+ return p - elf_sym_hashes (obj) + elf_tdata (obj)->symtab_hdr.sh_info;
+}
+
+/* Allocate function descriptors. We can do these for every function
+ in a main executable that is not exported. */
+
+static boolean
+allocate_fptr (dyn_i, data)
+ struct elf64_ia64_dyn_sym_info *dyn_i;
+ PTR data;
+{
+ struct elf64_ia64_allocate_data *x = (struct elf64_ia64_allocate_data *)data;
+
+ if (dyn_i->want_fptr)
+ {
+ struct elf_link_hash_entry *h = dyn_i->h;
+
+ if (h)
+ while (h->root.type == bfd_link_hash_indirect
+ || h->root.type == bfd_link_hash_warning)
+ h = (struct elf_link_hash_entry *) h->root.u.i.link;
+
+ if (x->info->shared)
+ {
+ if (h && h->dynindx == -1)
+ {
+ BFD_ASSERT ((h->root.type == bfd_link_hash_defined)
+ || (h->root.type == bfd_link_hash_defweak));
+
+ if (!_bfd_elf64_link_record_local_dynamic_symbol
+ (x->info, h->root.u.def.section->owner,
+ global_sym_index (h)))
+ return false;
+ }
+
+ dyn_i->want_fptr = 0;
+ }
+ else if (h == NULL || h->dynindx == -1)
+ {
+ dyn_i->fptr_offset = x->ofs;
+ x->ofs += 16;
+ }
+ else
+ dyn_i->want_fptr = 0;
+ }
+ return true;
+}
+
+/* Allocate all the minimal PLT entries. */
+
+static boolean
+allocate_plt_entries (dyn_i, data)
+ struct elf64_ia64_dyn_sym_info *dyn_i;
+ PTR data;
+{
+ struct elf64_ia64_allocate_data *x = (struct elf64_ia64_allocate_data *)data;
+
+ if (dyn_i->want_plt)
+ {
+ struct elf_link_hash_entry *h = dyn_i->h;
+
+ if (h)
+ while (h->root.type == bfd_link_hash_indirect
+ || h->root.type == bfd_link_hash_warning)
+ h = (struct elf_link_hash_entry *) h->root.u.i.link;
+
+ /* ??? Versioned symbols seem to lose ELF_LINK_HASH_NEEDS_PLT. */
+ if (elf64_ia64_dynamic_symbol_p (h, x->info))
+ {
+ bfd_size_type offset = x->ofs;
+ if (offset == 0)
+ offset = PLT_HEADER_SIZE;
+ dyn_i->plt_offset = offset;
+ x->ofs = offset + PLT_MIN_ENTRY_SIZE;
+
+ dyn_i->want_pltoff = 1;
+ }
+ else
+ {
+ dyn_i->want_plt = 0;
+ dyn_i->want_plt2 = 0;
+ }
+ }
+ return true;
+}
+
+/* Allocate all the full PLT entries. */
+
+static boolean
+allocate_plt2_entries (dyn_i, data)
+ struct elf64_ia64_dyn_sym_info *dyn_i;
+ PTR data;
+{
+ struct elf64_ia64_allocate_data *x = (struct elf64_ia64_allocate_data *)data;
+
+ if (dyn_i->want_plt2)
+ {
+ struct elf_link_hash_entry *h = dyn_i->h;
+ bfd_size_type ofs = x->ofs;
+
+ dyn_i->plt2_offset = ofs;
+ x->ofs = ofs + PLT_FULL_ENTRY_SIZE;
+
+ while (h->root.type == bfd_link_hash_indirect
+ || h->root.type == bfd_link_hash_warning)
+ h = (struct elf_link_hash_entry *) h->root.u.i.link;
+ dyn_i->h->plt.offset = ofs;
+ }
+ return true;
+}
+
+/* Allocate all the PLTOFF entries requested by relocations and
+ plt entries. We can't share space with allocated FPTR entries,
+ because the latter are not necessarily addressable by the GP.
+ ??? Relaxation might be able to determine that they are. */
+
+static boolean
+allocate_pltoff_entries (dyn_i, data)
+ struct elf64_ia64_dyn_sym_info *dyn_i;
+ PTR data;
+{
+ struct elf64_ia64_allocate_data *x = (struct elf64_ia64_allocate_data *)data;
+
+ if (dyn_i->want_pltoff)
+ {
+ dyn_i->pltoff_offset = x->ofs;
+ x->ofs += 16;
+ }
+ return true;
+}
+
+/* Allocate dynamic relocations for those symbols that turned out
+ to be dynamic. */
+
+static boolean
+allocate_dynrel_entries (dyn_i, data)
+ struct elf64_ia64_dyn_sym_info *dyn_i;
+ PTR data;
+{
+ struct elf64_ia64_allocate_data *x = (struct elf64_ia64_allocate_data *)data;
+ struct elf64_ia64_link_hash_table *ia64_info;
+ struct elf64_ia64_dyn_reloc_entry *rent;
+ boolean dynamic_symbol, shared;
+
+ ia64_info = elf64_ia64_hash_table (x->info);
+ dynamic_symbol = elf64_ia64_dynamic_symbol_p (dyn_i->h, x->info);
+ shared = x->info->shared;
+
+ /* Take care of the normal data relocations. */
+
+ for (rent = dyn_i->reloc_entries; rent; rent = rent->next)
+ {
+ switch (rent->type)
+ {
+ case R_IA64_FPTR64LSB:
+ /* Allocate one iff !want_fptr, which by this point will
+ be true only if we're actually allocating one statically
+ in the main executable. */
+ if (dyn_i->want_fptr)
+ continue;
+ break;
+ case R_IA64_PCREL64LSB:
+ if (!dynamic_symbol)
+ continue;
+ break;
+ case R_IA64_DIR64LSB:
+ if (!dynamic_symbol && !shared)
+ continue;
+ break;
+ }
+ rent->srel->_raw_size += sizeof (Elf64_External_Rela) * rent->count;
+ }
+
+ /* Take care of the GOT and PLT relocations. */
+
+ if (((dynamic_symbol || shared) && dyn_i->want_got)
+ || (dyn_i->want_ltoff_fptr && dyn_i->h && dyn_i->h->dynindx != -1))
+ ia64_info->rel_got_sec->_raw_size += sizeof (Elf64_External_Rela);
+
+ if (dyn_i->want_pltoff)
+ {
+ bfd_size_type t = 0;
+
+ /* Dynamic symbols get one IPLT relocation. Local symbols in
+ shared libraries get two REL relocations. Local symbols in
+ main applications get nothing. */
+ if (dynamic_symbol)
+ t = sizeof (Elf64_External_Rela);
+ else if (shared)
+ t = 2 * sizeof (Elf64_External_Rela);
+
+ ia64_info->rel_pltoff_sec->_raw_size += t;
+ }
+
+ return true;
+}
+
+static boolean
+elf64_ia64_adjust_dynamic_symbol (info, h)
+ struct bfd_link_info *info;
+ struct elf_link_hash_entry *h;
+{
+ /* ??? Undefined symbols with PLT entries should be re-defined
+ to be the PLT entry. */
+
+ /* If this is a weak symbol, and there is a real definition, the
+ processor independent code will have arranged for us to see the
+ real definition first, and we can just use the same value. */
+ if (h->weakdef != NULL)
+ {
+ BFD_ASSERT (h->weakdef->root.type == bfd_link_hash_defined
+ || h->weakdef->root.type == bfd_link_hash_defweak);
+ h->root.u.def.section = h->weakdef->root.u.def.section;
+ h->root.u.def.value = h->weakdef->root.u.def.value;
+ return true;
+ }
+
+ /* If this is a reference to a symbol defined by a dynamic object which
+ is not a function, we might allocate the symbol in our .dynbss section
+ and allocate a COPY dynamic relocation.
+
+ But IA-64 code is canonically PIC, so as a rule we can avoid this sort
+ of hackery. */
+
+ return true;
+}
+
+static boolean
+elf64_ia64_size_dynamic_sections (output_bfd, info)
+ bfd *output_bfd;
+ struct bfd_link_info *info;
+{
+ struct elf64_ia64_allocate_data data;
+ struct elf64_ia64_link_hash_table *ia64_info;
+ asection *sec;
+ bfd *dynobj;
+ boolean reltext = false;
+ boolean relplt = false;
+
+ dynobj = elf_hash_table(info)->dynobj;
+ ia64_info = elf64_ia64_hash_table (info);
+ BFD_ASSERT(dynobj != NULL);
+ data.info = info;
+
+ /* Set the contents of the .interp section to the interpreter. */
+ if (ia64_info->root.dynamic_sections_created
+ && !info->shared)
+ {
+ sec = bfd_get_section_by_name (dynobj, ".interp");
+ BFD_ASSERT (sec != NULL);
+ sec->contents = (bfd_byte *) ELF_DYNAMIC_INTERPRETER;
+ sec->_raw_size = strlen (ELF_DYNAMIC_INTERPRETER) + 1;
+ }
+
+ /* DT_INIT and DT_FINI get function descriptors not raw code addresses.
+ Force their symbols to have pltoff entries so we can use those. */
+ if (ia64_info->root.dynamic_sections_created)
+ {
+ struct elf_link_hash_entry *h;
+ struct elf64_ia64_dyn_sym_info *dyn_i;
+
+ if (info->init_function
+ && (h = elf_link_hash_lookup (elf_hash_table (info),
+ info->init_function, false,
+ false, false))
+ && (h->elf_link_hash_flags & (ELF_LINK_HASH_REF_REGULAR
+ | ELF_LINK_HASH_DEF_REGULAR)) != 0)
+ {
+ dyn_i = get_dyn_sym_info (ia64_info, h, output_bfd, NULL, true);
+ dyn_i->want_pltoff = 1;
+ }
+
+ if (info->fini_function
+ && (h = elf_link_hash_lookup (elf_hash_table (info),
+ info->fini_function, false,
+ false, false))
+ && (h->elf_link_hash_flags & (ELF_LINK_HASH_REF_REGULAR
+ | ELF_LINK_HASH_DEF_REGULAR)) != 0)
+ {
+ dyn_i = get_dyn_sym_info (ia64_info, h, output_bfd, NULL, true);
+ dyn_i->want_pltoff = 1;
+ }
+ }
+
+ /* Allocate the GOT entries. */
+
+ if (ia64_info->got_sec)
+ {
+ data.ofs = 0;
+ elf64_ia64_dyn_sym_traverse (ia64_info, allocate_global_data_got, &data);
+ elf64_ia64_dyn_sym_traverse (ia64_info, allocate_global_fptr_got, &data);
+ elf64_ia64_dyn_sym_traverse (ia64_info, allocate_local_got, &data);
+ ia64_info->got_sec->_raw_size = data.ofs;
+ }
+
+ /* Allocate the FPTR entries. */
+
+ if (ia64_info->fptr_sec)
+ {
+ data.ofs = 0;
+ elf64_ia64_dyn_sym_traverse (ia64_info, allocate_fptr, &data);
+ ia64_info->fptr_sec->_raw_size = data.ofs;
+ }
+
+ /* Now that we've seen all of the input files, we can decide which
+ symbols need plt entries. Allocate the minimal PLT entries first.
+ We do this even though dynamic_sections_created may be false, because
+ this has the side-effect of clearing want_plt and want_plt2. */
+
+ data.ofs = 0;
+ elf64_ia64_dyn_sym_traverse (ia64_info, allocate_plt_entries, &data);
+
+ ia64_info->minplt_entries = 0;
+ if (data.ofs)
+ {
+ ia64_info->minplt_entries
+ = (data.ofs - PLT_HEADER_SIZE) / PLT_MIN_ENTRY_SIZE;
+ }
+
+ /* Align the pointer for the plt2 entries. */
+ data.ofs = (data.ofs + 31) & -32;
+
+ elf64_ia64_dyn_sym_traverse (ia64_info, allocate_plt2_entries, &data);
+ if (data.ofs != 0)
+ {
+ BFD_ASSERT (ia64_info->root.dynamic_sections_created);
+
+ ia64_info->plt_sec->_raw_size = data.ofs;
+
+ /* If we've got a .plt, we need some extra memory for the dynamic
+ linker. We stuff these in .got.plt. */
+ sec = bfd_get_section_by_name (dynobj, ".got.plt");
+ sec->_raw_size = 8 * PLT_RESERVED_WORDS;
+ }
+
+ /* Allocate the PLTOFF entries. */
+
+ if (ia64_info->pltoff_sec)
+ {
+ data.ofs = 0;
+ elf64_ia64_dyn_sym_traverse (ia64_info, allocate_pltoff_entries, &data);
+ ia64_info->pltoff_sec->_raw_size = data.ofs;
+ }
+
+ if (ia64_info->root.dynamic_sections_created)
+ {
+ /* Allocate space for the dynamic relocations that turned out to be
+ required. */
+
+ elf64_ia64_dyn_sym_traverse (ia64_info, allocate_dynrel_entries, &data);
+ }
+
+ /* We have now determined the sizes of the various dynamic sections.
+ Allocate memory for them. */
+ for (sec = dynobj->sections; sec != NULL; sec = sec->next)
+ {
+ boolean strip;
+
+ if (!(sec->flags & SEC_LINKER_CREATED))
+ continue;
+
+ /* If we don't need this section, strip it from the output file.
+ There were several sections primarily related to dynamic
+ linking that must be create before the linker maps input
+ sections to output sections. The linker does that before
+ bfd_elf_size_dynamic_sections is called, and it is that
+ function which decides whether anything needs to go into
+ these sections. */
+
+ strip = (sec->_raw_size == 0);
+
+ if (sec == ia64_info->got_sec)
+ strip = false;
+ else if (sec == ia64_info->rel_got_sec)
+ {
+ if (strip)
+ ia64_info->rel_got_sec = NULL;
+ else
+ /* We use the reloc_count field as a counter if we need to
+ copy relocs into the output file. */
+ sec->reloc_count = 0;
+ }
+ else if (sec == ia64_info->fptr_sec)
+ {
+ if (strip)
+ ia64_info->fptr_sec = NULL;
+ }
+ else if (sec == ia64_info->plt_sec)
+ {
+ if (strip)
+ ia64_info->plt_sec = NULL;
+ }
+ else if (sec == ia64_info->pltoff_sec)
+ {
+ if (strip)
+ ia64_info->pltoff_sec = NULL;
+ }
+ else if (sec == ia64_info->rel_pltoff_sec)
+ {
+ if (strip)
+ ia64_info->rel_pltoff_sec = NULL;
+ else
+ {
+ relplt = true;
+ /* We use the reloc_count field as a counter if we need to
+ copy relocs into the output file. */
+ sec->reloc_count = 0;
+ }
+ }
+ else
+ {
+ const char *name;
+
+ /* It's OK to base decisions on the section name, because none
+ of the dynobj section names depend upon the input files. */
+ name = bfd_get_section_name (dynobj, sec);
+
+ if (strcmp (name, ".got.plt") == 0)
+ strip = false;
+ else if (strncmp (name, ".rel", 4) == 0)
+ {
+ if (!strip)
+ {
+ const char *outname;
+ asection *target;
+
+ /* If this relocation section applies to a read only
+ section, then we probably need a DT_TEXTREL entry. */
+ outname = bfd_get_section_name (output_bfd,
+ sec->output_section);
+ if (outname[4] == 'a')
+ outname += 5;
+ else
+ outname += 4;
+
+ target = bfd_get_section_by_name (output_bfd, outname);
+ if (target != NULL
+ && (target->flags & SEC_READONLY) != 0
+ && (target->flags & SEC_ALLOC) != 0)
+ reltext = true;
+
+ /* We use the reloc_count field as a counter if we need to
+ copy relocs into the output file. */
+ sec->reloc_count = 0;
+ }
+ }
+ else
+ continue;
+ }
+
+ if (strip)
+ _bfd_strip_section_from_output (info, sec);
+ else
+ {
+ /* Allocate memory for the section contents. */
+ sec->contents = (bfd_byte *) bfd_zalloc(dynobj, sec->_raw_size);
+ if (sec->contents == NULL && sec->_raw_size != 0)
+ return false;
+ }
+ }
+
+ if (elf_hash_table (info)->dynamic_sections_created)
+ {
+ /* Add some entries to the .dynamic section. We fill in the values
+ later (in finish_dynamic_sections) but we must add the entries now
+ so that we get the correct size for the .dynamic section. */
+
+ if (!info->shared)
+ {
+ /* The DT_DEBUG entry is filled in by the dynamic linker and used
+ by the debugger. */
+ if (!bfd_elf64_add_dynamic_entry (info, DT_DEBUG, 0))
+ return false;
+ }
+
+ if (! bfd_elf64_add_dynamic_entry (info, DT_IA_64_PLT_RESERVE, 0))
+ return false;
+ if (! bfd_elf64_add_dynamic_entry (info, DT_PLTGOT, 0))
+ return false;
+
+ if (relplt)
+ {
+ if (! bfd_elf64_add_dynamic_entry (info, DT_PLTRELSZ, 0)
+ || ! bfd_elf64_add_dynamic_entry (info, DT_PLTREL, DT_RELA)
+ || ! bfd_elf64_add_dynamic_entry (info, DT_JMPREL, 0))
+ return false;
+ }
+
+ if (! bfd_elf64_add_dynamic_entry (info, DT_RELA, 0)
+ || ! bfd_elf64_add_dynamic_entry (info, DT_RELASZ, 0)
+ || ! bfd_elf64_add_dynamic_entry (info, DT_RELAENT,
+ sizeof(Elf64_External_Rela)))
+ return false;
+
+ if (reltext)
+ {
+ if (! bfd_elf64_add_dynamic_entry (info, DT_TEXTREL, 0))
+ return false;
+ }
+ }
+
+ /* ??? Perhaps force __gp local. */
+
+ return true;
+}
+
+static bfd_reloc_status_type
+elf64_ia64_install_value (abfd, hit_addr, val, r_type)
+ bfd *abfd;
+ bfd_byte *hit_addr;
+ bfd_vma val;
+ unsigned int r_type;
+{
+ const struct ia64_operand *op;
+ int bigendian = 0, shift = 0;
+ bfd_vma t0, t1, insn, dword;
+ enum ia64_opnd opnd;
+ const char *err;
+ size_t size = 8;
+
+ opnd = IA64_OPND_NIL;
+ switch (r_type)
+ {
+ case R_IA64_NONE:
+ case R_IA64_LDXMOV:
+ return bfd_reloc_ok;
+
+ /* Instruction relocations. */
+
+ case R_IA64_IMM14: opnd = IA64_OPND_IMM14; break;
+ case R_IA64_PCREL21F: opnd = IA64_OPND_TGT25; break;
+ case R_IA64_PCREL21M: opnd = IA64_OPND_TGT25b; break;
+ case R_IA64_PCREL21B: opnd = IA64_OPND_TGT25c; break;
+
+ case R_IA64_IMM22:
+ case R_IA64_GPREL22:
+ case R_IA64_LTOFF22:
+ case R_IA64_LTOFF22X:
+ case R_IA64_PLTOFF22:
+ case R_IA64_LTOFF_FPTR22:
+ opnd = IA64_OPND_IMM22;
+ break;
+
+ case R_IA64_IMM64:
+ case R_IA64_GPREL64I:
+ case R_IA64_LTOFF64I:
+ case R_IA64_PLTOFF64I:
+ case R_IA64_FPTR64I:
+ case R_IA64_LTOFF_FPTR64I:
+ opnd = IA64_OPND_IMMU64;
+ break;
+
+ /* Data relocations. */
+
+ case R_IA64_DIR32MSB:
+ case R_IA64_GPREL32MSB:
+ case R_IA64_FPTR32MSB:
+ case R_IA64_PCREL32MSB:
+ case R_IA64_SEGREL32MSB:
+ case R_IA64_SECREL32MSB:
+ case R_IA64_LTV32MSB:
+ size = 4; bigendian = 1;
+ break;
+
+ case R_IA64_DIR32LSB:
+ case R_IA64_GPREL32LSB:
+ case R_IA64_FPTR32LSB:
+ case R_IA64_PCREL32LSB:
+ case R_IA64_SEGREL32LSB:
+ case R_IA64_SECREL32LSB:
+ case R_IA64_LTV32LSB:
+ size = 4; bigendian = 0;
+ break;
+
+ case R_IA64_DIR64MSB:
+ case R_IA64_GPREL64MSB:
+ case R_IA64_PLTOFF64MSB:
+ case R_IA64_FPTR64MSB:
+ case R_IA64_PCREL64MSB:
+ case R_IA64_LTOFF_FPTR64MSB:
+ case R_IA64_SEGREL64MSB:
+ case R_IA64_SECREL64MSB:
+ case R_IA64_LTV64MSB:
+ size = 8; bigendian = 1;
+ break;
+
+ case R_IA64_DIR64LSB:
+ case R_IA64_GPREL64LSB:
+ case R_IA64_PLTOFF64LSB:
+ case R_IA64_FPTR64LSB:
+ case R_IA64_PCREL64LSB:
+ case R_IA64_LTOFF_FPTR64LSB:
+ case R_IA64_SEGREL64LSB:
+ case R_IA64_SECREL64LSB:
+ case R_IA64_LTV64LSB:
+ size = 8; bigendian = 0;
+ break;
+
+ /* Unsupported / Dynamic relocations. */
+
+ case R_IA64_REL32MSB:
+ case R_IA64_REL32LSB:
+ case R_IA64_REL64MSB:
+ case R_IA64_REL64LSB:
+
+ case R_IA64_IPLTMSB:
+ case R_IA64_IPLTLSB:
+ case R_IA64_EPLTMSB:
+ case R_IA64_EPLTLSB:
+ case R_IA64_COPY:
+
+ case R_IA64_SEGBASE:
+
+ case R_IA64_TPREL22:
+ case R_IA64_TPREL64MSB:
+ case R_IA64_TPREL64LSB:
+ case R_IA64_LTOFF_TP22:
+
+ default:
+ return bfd_reloc_notsupported;
+ }
+
+ switch (opnd)
+ {
+ case IA64_OPND_IMMU64:
+ hit_addr -= (long) hit_addr & 0x3;
+ t0 = bfd_get_64 (abfd, hit_addr);
+ t1 = bfd_get_64 (abfd, hit_addr + 8);
+
+ /* tmpl/s: bits 0.. 5 in t0
+ slot 0: bits 5..45 in t0
+ slot 1: bits 46..63 in t0, bits 0..22 in t1
+ slot 2: bits 23..63 in t1 */
+
+ /* First, clear the bits that form the 64 bit constant. */
+ t0 &= ~(0x3ffffLL << 46);
+ t1 &= ~(0x7fffffLL
+ | (( (0x07fLL << 13) | (0x1ffLL << 27)
+ | (0x01fLL << 22) | (0x001LL << 21)
+ | (0x001LL << 36)) << 23));
+
+ t0 |= ((val >> 22) & 0x03ffffLL) << 46; /* 18 lsbs of imm41 */
+ t1 |= ((val >> 40) & 0x7fffffLL) << 0; /* 23 msbs of imm41 */
+ t1 |= ( (((val >> 0) & 0x07f) << 13) /* imm7b */
+ | (((val >> 7) & 0x1ff) << 27) /* imm9d */
+ | (((val >> 16) & 0x01f) << 22) /* imm5c */
+ | (((val >> 21) & 0x001) << 21) /* ic */
+ | (((val >> 63) & 0x001) << 36)) << 23; /* i */
+
+ bfd_put_64 (abfd, t0, hit_addr);
+ bfd_put_64 (abfd, t1, hit_addr + 8);
+ break;
+
+ default:
+ switch ((long) hit_addr & 0x3)
+ {
+ case 0: shift = 5; break;
+ case 1: shift = 14; hit_addr += 3; break;
+ case 2: shift = 23; hit_addr += 6; break;
+ case 3: return bfd_reloc_notsupported; /* shouldn't happen... */
+ }
+ dword = bfd_get_64 (abfd, hit_addr);
+ insn = (dword >> shift) & 0x1ffffffffffLL;
+
+ op = elf64_ia64_operands + opnd;
+ err = (*op->insert) (op, val, &insn);
+ if (err)
+ return bfd_reloc_overflow;
+
+ dword &= ~(0x1ffffffffffLL << shift);
+ dword |= (insn << shift);
+ bfd_put_64 (abfd, dword, hit_addr);
+ break;
+
+ case IA64_OPND_NIL:
+ /* A data relocation. */
+ if (bigendian)
+ if (size == 4)
+ bfd_putb32 (val, hit_addr);
+ else
+ bfd_putb64 (val, hit_addr);
+ else
+ if (size == 4)
+ bfd_putl32 (val, hit_addr);
+ else
+ bfd_putl64 (val, hit_addr);
+ break;
+ }
+
+ return bfd_reloc_ok;
+}
+
+static void
+elf64_ia64_install_dyn_reloc (abfd, info, sec, srel, offset, type,
+ dynindx, addend)
+ bfd *abfd;
+ struct bfd_link_info *info;
+ asection *sec;
+ asection *srel;
+ bfd_vma offset;
+ unsigned int type;
+ long dynindx;
+ bfd_vma addend;
+{
+ Elf_Internal_Rela outrel;
+
+ outrel.r_offset = (sec->output_section->vma
+ + sec->output_offset
+ + offset);
+
+ BFD_ASSERT (dynindx != -1);
+ outrel.r_info = ELF64_R_INFO (dynindx, type);
+ outrel.r_addend = addend;
+
+ if (elf_section_data (sec)->stab_info != NULL)
+ {
+ /* This may be NULL for linker-generated relocations, as it is
+ inconvenient to pass all the bits around. And this shouldn't
+ happen. */
+ BFD_ASSERT (info != NULL);
+
+ offset = (_bfd_stab_section_offset
+ (abfd, &elf_hash_table (info)->stab_info, sec,
+ &elf_section_data (sec)->stab_info, offset));
+ if (offset == (bfd_vma) -1)
+ {
+ /* Run for the hills. We shouldn't be outputting a relocation
+ for this. So do what everyone else does and output a no-op. */
+ outrel.r_info = ELF64_R_INFO (0, R_IA64_NONE);
+ outrel.r_addend = 0;
+ offset = 0;
+ }
+ outrel.r_offset = offset;
+ }
+
+ bfd_elf64_swap_reloca_out (abfd, &outrel,
+ ((Elf64_External_Rela *) srel->contents
+ + srel->reloc_count++));
+ BFD_ASSERT (sizeof(Elf64_External_Rela) * srel->reloc_count
+ <= srel->_cooked_size);
+}
+
+/* Store an entry for target address TARGET_ADDR in the linkage table
+ and return the gp-relative address of the linkage table entry. */
+
+static bfd_vma
+set_got_entry (abfd, info, dyn_i, dynindx, addend, value, dyn_r_type)
+ bfd *abfd;
+ struct bfd_link_info *info;
+ struct elf64_ia64_dyn_sym_info *dyn_i;
+ long dynindx;
+ bfd_vma addend;
+ bfd_vma value;
+ unsigned int dyn_r_type;
+{
+ struct elf64_ia64_link_hash_table *ia64_info;
+ asection *got_sec;
+
+ ia64_info = elf64_ia64_hash_table (info);
+ got_sec = ia64_info->got_sec;
+
+ BFD_ASSERT ((dyn_i->got_offset & 7) == 0);
+
+ if (! dyn_i->got_done)
+ {
+ dyn_i->got_done = true;
+
+ /* Store the target address in the linkage table entry. */
+ bfd_put_64 (abfd, value, got_sec->contents + dyn_i->got_offset);
+
+ /* Install a dynamic relocation if needed. */
+ if (info->shared
+ || elf64_ia64_dynamic_symbol_p (dyn_i->h, info)
+ || (dynindx != -1 && dyn_r_type == R_IA64_FPTR64LSB))
+ {
+ if (dynindx == -1)
+ {
+ dyn_r_type = R_IA64_REL64LSB;
+ dynindx = 0;
+ addend = value;
+ }
+
+ if (bfd_big_endian (abfd))
+ {
+ switch (dyn_r_type)
+ {
+ case R_IA64_REL64LSB:
+ dyn_r_type = R_IA64_REL64MSB;
+ break;
+ case R_IA64_DIR64LSB:
+ dyn_r_type = R_IA64_DIR64MSB;
+ break;
+ case R_IA64_FPTR64LSB:
+ dyn_r_type = R_IA64_FPTR64MSB;
+ break;
+ default:
+ BFD_ASSERT (false);
+ break;
+ }
+ }
+
+ elf64_ia64_install_dyn_reloc (abfd, NULL, got_sec,
+ ia64_info->rel_got_sec,
+ dyn_i->got_offset, dyn_r_type,
+ dynindx, addend);
+ }
+ }
+
+ /* Return the address of the linkage table entry. */
+ value = (got_sec->output_section->vma
+ + got_sec->output_offset
+ + dyn_i->got_offset);
+
+ return value;
+}
+
+/* Fill in a function descriptor consisting of the function's code
+ address and its global pointer. Return the descriptor's address. */
+
+static bfd_vma
+set_fptr_entry (abfd, info, dyn_i, value)
+ bfd *abfd;
+ struct bfd_link_info *info;
+ struct elf64_ia64_dyn_sym_info *dyn_i;
+ bfd_vma value;
+{
+ struct elf64_ia64_link_hash_table *ia64_info;
+ asection *fptr_sec;
+
+ ia64_info = elf64_ia64_hash_table (info);
+ fptr_sec = ia64_info->fptr_sec;
+
+ if (!dyn_i->fptr_done)
+ {
+ dyn_i->fptr_done = 1;
+
+ /* Fill in the function descriptor. */
+ bfd_put_64 (abfd, value, fptr_sec->contents + dyn_i->fptr_offset);
+ bfd_put_64 (abfd, _bfd_get_gp_value (abfd),
+ fptr_sec->contents + dyn_i->fptr_offset + 8);
+ }
+
+ /* Return the descriptor's address. */
+ value = (fptr_sec->output_section->vma
+ + fptr_sec->output_offset
+ + dyn_i->fptr_offset);
+
+ return value;
+}
+
+/* Fill in a PLTOFF entry consisting of the function's code address
+ and its global pointer. Return the descriptor's address. */
+
+static bfd_vma
+set_pltoff_entry (abfd, info, dyn_i, value, is_plt)
+ bfd *abfd;
+ struct bfd_link_info *info;
+ struct elf64_ia64_dyn_sym_info *dyn_i;
+ bfd_vma value;
+ boolean is_plt;
+{
+ struct elf64_ia64_link_hash_table *ia64_info;
+ asection *pltoff_sec;
+
+ ia64_info = elf64_ia64_hash_table (info);
+ pltoff_sec = ia64_info->pltoff_sec;
+
+ /* Don't do anything if this symbol uses a real PLT entry. In
+ that case, we'll fill this in during finish_dynamic_symbol. */
+ if ((! dyn_i->want_plt || is_plt)
+ && !dyn_i->pltoff_done)
+ {
+ /* Fill in the function descriptor. */
+ bfd_put_64 (abfd, value, pltoff_sec->contents + dyn_i->pltoff_offset);
+ bfd_put_64 (abfd, _bfd_get_gp_value (abfd),
+ pltoff_sec->contents + dyn_i->pltoff_offset + 8);
+
+ /* Install dynamic relocations if needed. */
+ if (!is_plt && info->shared)
+ {
+ unsigned int dyn_r_type;
+
+ if (bfd_big_endian (abfd))
+ dyn_r_type = R_IA64_REL64MSB;
+ else
+ dyn_r_type = R_IA64_REL64LSB;
+
+ elf64_ia64_install_dyn_reloc (abfd, NULL, pltoff_sec,
+ ia64_info->rel_pltoff_sec,
+ dyn_i->pltoff_offset,
+ dyn_r_type, 0, 0);
+ elf64_ia64_install_dyn_reloc (abfd, NULL, pltoff_sec,
+ ia64_info->rel_pltoff_sec,
+ dyn_i->pltoff_offset + 8,
+ dyn_r_type, 0, 0);
+ }
+
+ dyn_i->pltoff_done = 1;
+ }
+
+ /* Return the descriptor's address. */
+ value = (pltoff_sec->output_section->vma
+ + pltoff_sec->output_offset
+ + dyn_i->pltoff_offset);
+
+ return value;
+}
+
+static boolean
+elf64_ia64_final_link (abfd, info)
+ bfd *abfd;
+ struct bfd_link_info *info;
+{
+ struct elf64_ia64_link_hash_table *ia64_info;
+ ia64_info = elf64_ia64_hash_table (info);
+
+ /* Make sure we've got ourselves a nice fat __gp value. */
+ if (!info->relocateable)
+ {
+ bfd_vma min_vma = (bfd_vma) -1, max_vma = 0;
+ bfd_vma min_short_vma = min_vma, max_short_vma = 0;
+ struct elf_link_hash_entry *gp;
+ bfd_vma gp_val;
+ asection *os;
+
+ /* Find the min and max vma of all sections marked short. Also
+ collect min and max vma of any type, for use in selecting a
+ nice gp. */
+ for (os = abfd->sections; os ; os = os->next)
+ {
+ bfd_vma lo, hi;
+
+ if ((os->flags & SEC_ALLOC) == 0)
+ continue;
+
+ lo = os->vma;
+ hi = os->vma + os->_raw_size;
+ if (hi < lo)
+ hi = (bfd_vma) -1;
+
+ if (min_vma > lo)
+ min_vma = lo;
+ if (max_vma < hi)
+ max_vma = hi;
+ if (os->flags & SEC_SMALL_DATA)
+ {
+ if (min_short_vma > lo)
+ min_short_vma = lo;
+ if (max_short_vma < hi)
+ max_short_vma = hi;
+ }
+ }
+
+ /* See if the user wants to force a value. */
+ gp = elf_link_hash_lookup (elf_hash_table (info), "__gp", false,
+ false, false);
+
+ if (gp
+ && (gp->root.type == bfd_link_hash_defined
+ || gp->root.type == bfd_link_hash_defweak))
+ {
+ asection *gp_sec = gp->root.u.def.section;
+ gp_val = (gp->root.u.def.value
+ + gp_sec->output_section->vma
+ + gp_sec->output_offset);
+ }
+ else
+ {
+ /* Pick a sensible value. */
+
+ asection *got_sec = ia64_info->got_sec;
+
+ /* Start with just the address of the .got. */
+ if (got_sec)
+ gp_val = got_sec->output_section->vma;
+ else if (max_short_vma != 0)
+ gp_val = min_short_vma;
+ else
+ gp_val = min_vma;
+
+ /* If it is possible to address the entire image, but we
+ don't with the choice above, adjust. */
+ if (max_vma - min_vma < 0x400000
+ && max_vma - gp_val <= 0x200000
+ && gp_val - min_vma > 0x200000)
+ gp_val = min_vma + 0x200000;
+ else if (max_short_vma != 0)
+ {
+ /* If we don't cover all the short data, adjust. */
+ if (max_short_vma - gp_val >= 0x200000)
+ gp_val = min_short_vma + 0x200000;
+
+ /* If we're addressing stuff past the end, adjust back. */
+ if (gp_val > max_vma)
+ gp_val = max_vma - 0x200000 + 8;
+ }
+ }
+
+ /* Validate whether all SHF_IA_64_SHORT sections are within
+ range of the chosen GP. */
+
+ if (max_short_vma != 0)
+ {
+ if (max_short_vma - min_short_vma >= 0x400000)
+ {
+ (*_bfd_error_handler)
+ (_("%s: short data segment overflowed (0x%lx >= 0x400000)"),
+ bfd_get_filename (abfd),
+ (unsigned long)(max_short_vma - min_short_vma));
+ return false;
+ }
+ else if ((gp_val > min_short_vma
+ && gp_val - min_short_vma > 0x200000)
+ || (gp_val < max_short_vma
+ && max_short_vma - gp_val >= 0x200000))
+ {
+ (*_bfd_error_handler)
+ (_("%s: __gp does not cover short data segment"),
+ bfd_get_filename (abfd));
+ return false;
+ }
+ }
+
+ _bfd_set_gp_value (abfd, gp_val);
+ }
+
+ /* Tricky bits. DT_INIT and DT_FINI use a pltoff entry, which is
+ normally initialized in finish_dynamic_sections. Except that
+ we need all non-plt pltoff entries to be initialized before
+ finish_dynamic_symbols. This because the array of relocations
+ used for plt entries (aka DT_JMPREL) begins after all the
+ non-plt pltoff relocations. If the order gets confused, we
+ munge either the array or the array base. */
+ if (ia64_info->root.dynamic_sections_created)
+ {
+ struct elf_link_hash_entry *h;
+ struct elf64_ia64_dyn_sym_info *dyn_i;
+ bfd_vma addr;
+
+ if (info->init_function
+ && (h = elf_link_hash_lookup (elf_hash_table (info),
+ info->init_function, false,
+ false, false))
+ && (h->elf_link_hash_flags & (ELF_LINK_HASH_REF_REGULAR
+ | ELF_LINK_HASH_DEF_REGULAR)) != 0)
+ {
+ dyn_i = get_dyn_sym_info (ia64_info, h, NULL, NULL, false);
+ addr = (h->root.u.def.section->output_section->vma
+ + h->root.u.def.section->output_offset
+ + h->root.u.def.value);
+ (void) set_pltoff_entry (abfd, info, dyn_i, addr, false);
+ }
+
+ if (info->fini_function
+ && (h = elf_link_hash_lookup (elf_hash_table (info),
+ info->fini_function, false,
+ false, false))
+ && (h->elf_link_hash_flags & (ELF_LINK_HASH_REF_REGULAR
+ | ELF_LINK_HASH_DEF_REGULAR)) != 0)
+ {
+ dyn_i = get_dyn_sym_info (ia64_info, h, NULL, NULL, false);
+ addr = (h->root.u.def.section->output_section->vma
+ + h->root.u.def.section->output_offset
+ + h->root.u.def.value);
+ (void) set_pltoff_entry (abfd, info, dyn_i, addr, false);
+ }
+ }
+
+ /* Invoke the regular ELF backend linker to do all the work. */
+ return bfd_elf64_bfd_final_link (abfd, info);
+}
+
+static boolean
+elf64_ia64_relocate_section (output_bfd, info, input_bfd, input_section,
+ contents, relocs, local_syms, local_sections)
+ bfd *output_bfd;
+ struct bfd_link_info *info;
+ bfd *input_bfd;
+ asection *input_section;
+ bfd_byte *contents;
+ Elf_Internal_Rela *relocs;
+ Elf_Internal_Sym *local_syms;
+ asection **local_sections;
+{
+ struct elf64_ia64_link_hash_table *ia64_info;
+ Elf_Internal_Shdr *symtab_hdr;
+ Elf_Internal_Rela *rel;
+ Elf_Internal_Rela *relend;
+ asection *srel;
+ boolean ret_val = true; /* for non-fatal errors */
+ bfd_vma gp_val;
+
+ symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
+ ia64_info = elf64_ia64_hash_table (info);
+
+ /* Infect various flags from the input section to the output section. */
+ if (info->relocateable)
+ {
+ bfd_vma flags;
+
+ flags = elf_section_data(input_section)->this_hdr.sh_flags;
+ flags &= SHF_IA_64_NORECOV;
+
+ elf_section_data(input_section->output_section)
+ ->this_hdr.sh_flags |= flags;
+ }
+
+ gp_val = _bfd_get_gp_value (output_bfd);
+ srel = get_reloc_section (input_bfd, ia64_info, input_section, false);
+
+ rel = relocs;
+ relend = relocs + input_section->reloc_count;
+ for (; rel < relend; ++rel)
+ {
+ struct elf_link_hash_entry *h;
+ struct elf64_ia64_dyn_sym_info *dyn_i;
+ bfd_reloc_status_type r;
+ reloc_howto_type *howto;
+ unsigned long r_symndx;
+ Elf_Internal_Sym *sym;
+ unsigned int r_type;
+ bfd_vma value;
+ asection *sym_sec;
+ bfd_byte *hit_addr;
+ boolean dynamic_symbol_p;
+ boolean undef_weak_ref;
+
+ r_type = ELF64_R_TYPE (rel->r_info);
+ if (r_type > R_IA64_MAX_RELOC_CODE)
+ {
+ (*_bfd_error_handler)
+ (_("%s: unknown relocation type %d"),
+ bfd_get_filename (input_bfd), (int)r_type);
+ bfd_set_error (bfd_error_bad_value);
+ ret_val = false;
+ continue;
+ }
+ howto = lookup_howto (r_type);
+ r_symndx = ELF64_R_SYM (rel->r_info);
+
+ if (info->relocateable)
+ {
+ /* This is a relocateable link. We don't have to change
+ anything, unless the reloc is against a section symbol,
+ in which case we have to adjust according to where the
+ section symbol winds up in the output section. */
+ if (r_symndx < symtab_hdr->sh_info)
+ {
+ sym = local_syms + r_symndx;
+ if (ELF_ST_TYPE (sym->st_info) == STT_SECTION)
+ {
+ sym_sec = local_sections[r_symndx];
+ rel->r_addend += sym_sec->output_offset;
+ }
+ }
+ continue;
+ }
+
+ /* This is a final link. */
+
+ h = NULL;
+ sym = NULL;
+ sym_sec = NULL;
+ undef_weak_ref = false;
+
+ if (r_symndx < symtab_hdr->sh_info)
+ {
+ /* Reloc against local symbol. */
+ sym = local_syms + r_symndx;
+ sym_sec = local_sections[r_symndx];
+ value = (sym_sec->output_section->vma
+ + sym_sec->output_offset
+ + sym->st_value);
+ }
+ else
+ {
+ long indx;
+
+ /* Reloc against global symbol. */
+ indx = r_symndx - symtab_hdr->sh_info;
+ h = elf_sym_hashes (input_bfd)[indx];
+ while (h->root.type == bfd_link_hash_indirect
+ || h->root.type == bfd_link_hash_warning)
+ h = (struct elf_link_hash_entry *) h->root.u.i.link;
+
+ value = 0;
+ if (h->root.type == bfd_link_hash_defined
+ || h->root.type == bfd_link_hash_defweak)
+ {
+ sym_sec = h->root.u.def.section;
+
+ /* Detect the cases that sym_sec->output_section is
+ expected to be NULL -- all cases in which the symbol
+ is defined in another shared module. This includes
+ PLT relocs for which we've created a PLT entry and
+ other relocs for which we're prepared to create
+ dynamic relocations. */
+ /* ??? Just accept it NULL and continue. */
+
+ if (sym_sec->output_section != NULL)
+ {
+ value = (h->root.u.def.value
+ + sym_sec->output_section->vma
+ + sym_sec->output_offset);
+ }
+ }
+ else if (h->root.type == bfd_link_hash_undefweak)
+ undef_weak_ref = true;
+ else if (info->shared && !info->symbolic && !info->no_undefined)
+ ;
+ else
+ {
+ if (! ((*info->callbacks->undefined_symbol)
+ (info, h->root.root.string, input_bfd,
+ input_section, rel->r_offset,
+ (!info->shared || info->no_undefined))))
+ return false;
+ ret_val = false;
+ continue;
+ }
+ }
+
+ hit_addr = contents + rel->r_offset;
+ value += rel->r_addend;
+ dynamic_symbol_p = elf64_ia64_dynamic_symbol_p (h, info);
+
+ switch (r_type)
+ {
+ case R_IA64_NONE:
+ case R_IA64_LDXMOV:
+ continue;
+
+ case R_IA64_IMM14:
+ case R_IA64_IMM22:
+ case R_IA64_IMM64:
+ case R_IA64_DIR32MSB:
+ case R_IA64_DIR32LSB:
+ case R_IA64_DIR64MSB:
+ case R_IA64_DIR64LSB:
+ /* Install a dynamic relocation for this reloc. */
+ if ((dynamic_symbol_p || info->shared)
+ && (input_section->flags & SEC_ALLOC) != 0)
+ {
+ unsigned int dyn_r_type;
+ long dynindx;
+
+ BFD_ASSERT (srel != NULL);
+
+ /* If we don't need dynamic symbol lookup, find a
+ matching RELATIVE relocation. */
+ dyn_r_type = r_type;
+ if (dynamic_symbol_p)
+ dynindx = h->dynindx;
+ else
+ {
+ switch (r_type)
+ {
+ case R_IA64_DIR32MSB:
+ dyn_r_type = R_IA64_REL32MSB;
+ break;
+ case R_IA64_DIR32LSB:
+ dyn_r_type = R_IA64_REL32LSB;
+ break;
+ case R_IA64_DIR64MSB:
+ dyn_r_type = R_IA64_REL64MSB;
+ break;
+ case R_IA64_DIR64LSB:
+ dyn_r_type = R_IA64_REL64LSB;
+ break;
+
+ default:
+ /* We can't represent this without a dynamic symbol.
+ Adjust the relocation to be against an output
+ section symbol, which are always present in the
+ dynamic symbol table. */
+ /* ??? People shouldn't be doing non-pic code in
+ shared libraries. Hork. */
+ (*_bfd_error_handler)
+ (_("%s: linking non-pic code in a shared library"),
+ bfd_get_filename (input_bfd));
+ ret_val = false;
+ continue;
+ }
+ dynindx = 0;
+ }
+
+ elf64_ia64_install_dyn_reloc (output_bfd, info, input_section,
+ srel, rel->r_offset, dyn_r_type,
+ dynindx, rel->r_addend);
+ }
+ /* FALLTHRU */
+
+ case R_IA64_LTV32MSB:
+ case R_IA64_LTV32LSB:
+ case R_IA64_LTV64MSB:
+ case R_IA64_LTV64LSB:
+ r = elf64_ia64_install_value (output_bfd, hit_addr, value, r_type);
+ break;
+
+ case R_IA64_GPREL22:
+ case R_IA64_GPREL64I:
+ case R_IA64_GPREL32MSB:
+ case R_IA64_GPREL32LSB:
+ case R_IA64_GPREL64MSB:
+ case R_IA64_GPREL64LSB:
+ if (dynamic_symbol_p)
+ {
+ (*_bfd_error_handler)
+ (_("%s: @gprel relocation against dynamic symbol %s"),
+ bfd_get_filename (input_bfd), h->root.root.string);
+ ret_val = false;
+ continue;
+ }
+ value -= gp_val;
+ r = elf64_ia64_install_value (output_bfd, hit_addr, value, r_type);
+ break;
+
+ case R_IA64_LTOFF22:
+ case R_IA64_LTOFF22X:
+ case R_IA64_LTOFF64I:
+ dyn_i = get_dyn_sym_info (ia64_info, h, input_bfd, rel, false);
+ value = set_got_entry (input_bfd, info, dyn_i, (h ? h->dynindx : -1),
+ rel->r_addend, value, R_IA64_DIR64LSB);
+ value -= gp_val;
+ r = elf64_ia64_install_value (output_bfd, hit_addr, value, r_type);
+ break;
+
+ case R_IA64_PLTOFF22:
+ case R_IA64_PLTOFF64I:
+ case R_IA64_PLTOFF64MSB:
+ case R_IA64_PLTOFF64LSB:
+ dyn_i = get_dyn_sym_info (ia64_info, h, input_bfd, rel, false);
+ value = set_pltoff_entry (output_bfd, info, dyn_i, value, false);
+ value -= gp_val;
+ r = elf64_ia64_install_value (output_bfd, hit_addr, value, r_type);
+ break;
+
+ case R_IA64_FPTR64I:
+ case R_IA64_FPTR32MSB:
+ case R_IA64_FPTR32LSB:
+ case R_IA64_FPTR64MSB:
+ case R_IA64_FPTR64LSB:
+ dyn_i = get_dyn_sym_info (ia64_info, h, input_bfd, rel, false);
+ if (dyn_i->want_fptr)
+ {
+ if (!undef_weak_ref)
+ value = set_fptr_entry (output_bfd, info, dyn_i, value);
+ }
+ else
+ {
+ long dynindx;
+
+ /* Otherwise, we expect the dynamic linker to create
+ the entry. */
+
+ if (h)
+ {
+ if (h->dynindx != -1)
+ dynindx = h->dynindx;
+ else
+ dynindx = (_bfd_elf_link_lookup_local_dynindx
+ (info, h->root.u.def.section->owner,
+ global_sym_index (h)));
+ }
+ else
+ {
+ dynindx = (_bfd_elf_link_lookup_local_dynindx
+ (info, input_bfd, r_symndx));
+ }
+
+ elf64_ia64_install_dyn_reloc (output_bfd, info, input_section,
+ srel, rel->r_offset, r_type,
+ dynindx, rel->r_addend);
+ value = 0;
+ }
+
+ r = elf64_ia64_install_value (output_bfd, hit_addr, value, r_type);
+ break;
+
+ case R_IA64_LTOFF_FPTR22:
+ case R_IA64_LTOFF_FPTR64I:
+ case R_IA64_LTOFF_FPTR64MSB:
+ case R_IA64_LTOFF_FPTR64LSB:
+ {
+ long dynindx;
+
+ dyn_i = get_dyn_sym_info (ia64_info, h, input_bfd, rel, false);
+ if (dyn_i->want_fptr)
+ {
+ BFD_ASSERT (h == NULL || h->dynindx == -1)
+ if (!undef_weak_ref)
+ value = set_fptr_entry (output_bfd, info, dyn_i, value);
+ dynindx = -1;
+ }
+ else
+ {
+ /* Otherwise, we expect the dynamic linker to create
+ the entry. */
+ if (h)
+ {
+ if (h->dynindx != -1)
+ dynindx = h->dynindx;
+ else
+ dynindx = (_bfd_elf_link_lookup_local_dynindx
+ (info, h->root.u.def.section->owner,
+ global_sym_index (h)));
+ }
+ else
+ dynindx = (_bfd_elf_link_lookup_local_dynindx
+ (info, input_bfd, r_symndx));
+ value = 0;
+ }
+
+ value = set_got_entry (output_bfd, info, dyn_i, dynindx,
+ rel->r_addend, value, R_IA64_FPTR64LSB);
+ value -= gp_val;
+ r = elf64_ia64_install_value (output_bfd, hit_addr, value, r_type);
+ }
+ break;
+
+ case R_IA64_PCREL32MSB:
+ case R_IA64_PCREL32LSB:
+ case R_IA64_PCREL64MSB:
+ case R_IA64_PCREL64LSB:
+ /* Install a dynamic relocation for this reloc. */
+ if (dynamic_symbol_p)
+ {
+ BFD_ASSERT (srel != NULL);
+
+ elf64_ia64_install_dyn_reloc (output_bfd, info, input_section,
+ srel, rel->r_offset, r_type,
+ h->dynindx, rel->r_addend);
+ }
+ goto finish_pcrel;
+
+ case R_IA64_PCREL21F:
+ case R_IA64_PCREL21M:
+ /* ??? These two are only used for speculation fixup code.
+ They should never be dynamic. */
+ if (dynamic_symbol_p)
+ {
+ (*_bfd_error_handler)
+ (_("%s: dynamic relocation against speculation fixup"),
+ bfd_get_filename (input_bfd));
+ ret_val = false;
+ continue;
+ }
+ if (undef_weak_ref)
+ {
+ (*_bfd_error_handler)
+ (_("%s: speculation fixup against undefined weak symbol"),
+ bfd_get_filename (input_bfd));
+ ret_val = false;
+ continue;
+ }
+ goto finish_pcrel;
+
+ case R_IA64_PCREL21B:
+ /* We should have created a PLT entry for any dynamic symbol. */
+ /* ??? How to handle out of range branches, which are supposed
+ to be fixed up by a conforming linker. */
+
+ dyn_i = NULL;
+ if (h)
+ dyn_i = get_dyn_sym_info (ia64_info, h, NULL, NULL, false);
+
+ if (dyn_i && dyn_i->want_plt2)
+ {
+ /* Should have caught this earlier. */
+ BFD_ASSERT (rel->r_addend == 0);
+
+ value = (ia64_info->plt_sec->output_section->vma
+ + ia64_info->plt_sec->output_offset
+ + dyn_i->plt2_offset);
+ }
+ else
+ {
+ /* Since there's no PLT entry, Validate that this is
+ locally defined. */
+ BFD_ASSERT (undef_weak_ref || sym_sec->output_section != NULL);
+
+ /* If the symbol is undef_weak, we shouldn't be trying
+ to call it. There's every chance that we'd wind up
+ with an out-of-range fixup here. Don't bother setting
+ any value at all. */
+ if (undef_weak_ref)
+ continue;
+ }
+ goto finish_pcrel;
+
+ finish_pcrel:
+ /* Make pc-relative. */
+ value -= (input_section->output_section->vma
+ + input_section->output_offset
+ + rel->r_offset) & ~ (bfd_vma) 0x3;
+ r = elf64_ia64_install_value (output_bfd, hit_addr, value, r_type);
+ break;
+
+ case R_IA64_SEGREL32MSB:
+ case R_IA64_SEGREL32LSB:
+ case R_IA64_SEGREL64MSB:
+ case R_IA64_SEGREL64LSB:
+ {
+ struct elf_segment_map *m;
+ Elf_Internal_Phdr *p;
+
+ /* Find the segment that contains the output_section. */
+ for (m = elf_tdata (output_bfd)->segment_map,
+ p = elf_tdata (output_bfd)->phdr;
+ m != NULL;
+ m = m->next, p++)
+ {
+ int i;
+ for (i = m->count - 1; i >= 0; i--)
+ if (m->sections[i] == sym_sec->output_section)
+ break;
+ if (i >= 0)
+ break;
+ }
+
+ if (m == NULL)
+ {
+ /* If the input section was discarded from the output, then
+ do nothing. */
+
+ if (bfd_is_abs_section (sym_sec->output_section))
+ r = bfd_reloc_ok;
+ else
+ r = bfd_reloc_notsupported;
+ }
+ else
+ {
+ /* The VMA of the segment is the vaddr of the associated
+ program header. */
+ if (value > p->p_vaddr)
+ value -= p->p_vaddr;
+ else
+ value = 0;
+ r = elf64_ia64_install_value (output_bfd, hit_addr, value,
+ r_type);
+ }
+ break;
+ }
+
+ case R_IA64_SECREL32MSB:
+ case R_IA64_SECREL32LSB:
+ case R_IA64_SECREL64MSB:
+ case R_IA64_SECREL64LSB:
+ /* Make output-section relative. */
+ if (value > input_section->output_section->vma)
+ value -= input_section->output_section->vma;
+ else
+ value = 0;
+ r = elf64_ia64_install_value (output_bfd, hit_addr, value, r_type);
+ break;
+
+ case R_IA64_SEGBASE:
+
+ case R_IA64_REL32MSB:
+ case R_IA64_REL32LSB:
+ case R_IA64_REL64MSB:
+ case R_IA64_REL64LSB:
+
+ case R_IA64_IPLTMSB:
+ case R_IA64_IPLTLSB:
+ case R_IA64_EPLTMSB:
+ case R_IA64_EPLTLSB:
+ case R_IA64_COPY:
+
+ case R_IA64_TPREL22:
+ case R_IA64_TPREL64MSB:
+ case R_IA64_TPREL64LSB:
+ case R_IA64_LTOFF_TP22:
+ default:
+ r = bfd_reloc_notsupported;
+ break;
+ }
+
+ switch (r)
+ {
+ case bfd_reloc_ok:
+ break;
+
+ case bfd_reloc_undefined:
+ /* This can happen for global table relative relocs if
+ __gp is undefined. This is a panic situation so we
+ don't try to continue. */
+ (*info->callbacks->undefined_symbol)
+ (info, "__gp", input_bfd, input_section, rel->r_offset, 1);
+ return false;
+
+ case bfd_reloc_notsupported:
+ {
+ const char *name;
+
+ if (h)
+ name = h->root.root.string;
+ else
+ {
+ name = bfd_elf_string_from_elf_section (input_bfd,
+ symtab_hdr->sh_link,
+ sym->st_name);
+ if (name == NULL)
+ return false;
+ if (*name == '\0')
+ name = bfd_section_name (input_bfd, input_section);
+ }
+ if (!(*info->callbacks->warning) (info, _("unsupported reloc"),
+ name, input_bfd,
+ input_section, rel->r_offset))
+ return false;
+ ret_val = false;
+ }
+ break;
+
+ case bfd_reloc_dangerous:
+ case bfd_reloc_outofrange:
+ case bfd_reloc_overflow:
+ default:
+ {
+ const char *name;
+
+ if (h)
+ name = h->root.root.string;
+ else
+ {
+ name = bfd_elf_string_from_elf_section (input_bfd,
+ symtab_hdr->sh_link,
+ sym->st_name);
+ if (name == NULL)
+ return false;
+ if (*name == '\0')
+ name = bfd_section_name (input_bfd, input_section);
+ }
+ if (!(*info->callbacks->reloc_overflow) (info, name,
+ howto->name, 0,
+ input_bfd,
+ input_section,
+ rel->r_offset))
+ return false;
+ ret_val = false;
+ }
+ break;
+ }
+ }
+
+ return ret_val;
+}
+
+static boolean
+elf64_ia64_finish_dynamic_symbol (output_bfd, info, h, sym)
+ bfd *output_bfd;
+ struct bfd_link_info *info;
+ struct elf_link_hash_entry *h;
+ Elf_Internal_Sym *sym;
+{
+ struct elf64_ia64_link_hash_table *ia64_info;
+ struct elf64_ia64_dyn_sym_info *dyn_i;
+
+ ia64_info = elf64_ia64_hash_table (info);
+ dyn_i = get_dyn_sym_info (ia64_info, h, NULL, NULL, false);
+
+ /* Fill in the PLT data, if required. */
+ if (dyn_i && dyn_i->want_plt)
+ {
+ Elf_Internal_Rela outrel;
+ bfd_byte *loc;
+ asection *plt_sec;
+ bfd_vma plt_addr, pltoff_addr, gp_val, index;
+ Elf64_External_Rela *rel;
+
+ gp_val = _bfd_get_gp_value (output_bfd);
+
+ /* Initialize the minimal PLT entry. */
+
+ index = (dyn_i->plt_offset - PLT_HEADER_SIZE) / PLT_MIN_ENTRY_SIZE;
+ plt_sec = ia64_info->plt_sec;
+ loc = plt_sec->contents + dyn_i->plt_offset;
+
+ memcpy (loc, plt_min_entry, PLT_MIN_ENTRY_SIZE);
+ elf64_ia64_install_value (output_bfd, loc, index, R_IA64_IMM22);
+ elf64_ia64_install_value (output_bfd, loc+2, -dyn_i->plt_offset,
+ R_IA64_PCREL21B);
+
+ plt_addr = (plt_sec->output_section->vma
+ + plt_sec->output_offset
+ + dyn_i->plt_offset);
+ pltoff_addr = set_pltoff_entry (output_bfd, info, dyn_i, plt_addr, true);
+
+ /* Initialize the FULL PLT entry, if needed. */
+ if (dyn_i->want_plt2)
+ {
+ loc = plt_sec->contents + dyn_i->plt2_offset;
+
+ memcpy (loc, plt_full_entry, PLT_FULL_ENTRY_SIZE);
+ elf64_ia64_install_value (output_bfd, loc, pltoff_addr - gp_val,
+ R_IA64_IMM22);
+
+ /* Mark the symbol as undefined, rather than as defined in the
+ plt section. Leave the value alone. */
+ /* ??? We didn't redefine it in adjust_dynamic_symbol in the
+ first place. But perhaps elflink.h did some for us. */
+ if ((h->elf_link_hash_flags & ELF_LINK_HASH_DEF_REGULAR) == 0)
+ sym->st_shndx = SHN_UNDEF;
+ }
+
+ /* Create the dynamic relocation. */
+ outrel.r_offset = pltoff_addr;
+ if (bfd_little_endian (output_bfd))
+ outrel.r_info = ELF64_R_INFO (h->dynindx, R_IA64_IPLTLSB);
+ else
+ outrel.r_info = ELF64_R_INFO (h->dynindx, R_IA64_IPLTMSB);
+ outrel.r_addend = 0;
+
+ /* This is fun. In the .IA_64.pltoff section, we've got entries
+ that correspond both to real PLT entries, and those that
+ happened to resolve to local symbols but need to be created
+ to satisfy @pltoff relocations. The .rela.IA_64.pltoff
+ relocations for the real PLT should come at the end of the
+ section, so that they can be indexed by plt entry at runtime.
+
+ We emitted all of the relocations for the non-PLT @pltoff
+ entries during relocate_section. So we can consider the
+ existing sec->reloc_count to be the base of the array of
+ PLT relocations. */
+
+ rel = (Elf64_External_Rela *)ia64_info->rel_pltoff_sec->contents;
+ rel += ia64_info->rel_pltoff_sec->reloc_count;
+
+ bfd_elf64_swap_reloca_out (output_bfd, &outrel, rel + index);
+ }
+
+ /* Mark some specially defined symbols as absolute. */
+ if (strcmp (h->root.root.string, "_DYNAMIC") == 0
+ || strcmp (h->root.root.string, "_GLOBAL_OFFSET_TABLE_") == 0
+ || strcmp (h->root.root.string, "_PROCEDURE_LINKAGE_TABLE_") == 0)
+ sym->st_shndx = SHN_ABS;
+
+ return true;
+}
+
+static boolean
+elf64_ia64_finish_dynamic_sections (abfd, info)
+ bfd *abfd;
+ struct bfd_link_info *info;
+{
+ struct elf64_ia64_link_hash_table *ia64_info;
+ bfd *dynobj;
+
+ ia64_info = elf64_ia64_hash_table (info);
+ dynobj = ia64_info->root.dynobj;
+
+ if (elf_hash_table (info)->dynamic_sections_created)
+ {
+ Elf64_External_Dyn *dyncon, *dynconend;
+ asection *sdyn, *sgotplt;
+ bfd_vma gp_val;
+
+ sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
+ sgotplt = bfd_get_section_by_name (dynobj, ".got.plt");
+ BFD_ASSERT (sdyn != NULL);
+ dyncon = (Elf64_External_Dyn *) sdyn->contents;
+ dynconend = (Elf64_External_Dyn *) (sdyn->contents + sdyn->_raw_size);
+
+ gp_val = _bfd_get_gp_value (abfd);
+
+ for (; dyncon < dynconend; dyncon++)
+ {
+ Elf_Internal_Dyn dyn;
+ const char *name;
+ asection *s;
+
+ bfd_elf64_swap_dyn_in (dynobj, dyncon, &dyn);
+
+ switch (dyn.d_tag)
+ {
+ case DT_PLTGOT:
+ dyn.d_un.d_ptr = gp_val;
+ break;
+
+ case DT_PLTRELSZ:
+ dyn.d_un.d_val = (ia64_info->minplt_entries
+ * sizeof (Elf64_External_Rela));
+ break;
+
+ case DT_JMPREL:
+ /* See the comment above in finish_dynamic_symbol. */
+ dyn.d_un.d_ptr = (ia64_info->rel_pltoff_sec->output_section->vma
+ + ia64_info->rel_pltoff_sec->output_offset
+ + (ia64_info->rel_pltoff_sec->reloc_count
+ * sizeof (Elf64_External_Rela)));
+ break;
+
+ case DT_IA_64_PLT_RESERVE:
+ dyn.d_un.d_ptr = (sgotplt->output_section->vma
+ + sgotplt->output_offset);
+ break;
+
+ case DT_RELASZ:
+ /* Do not have RELASZ include JMPREL. This makes things
+ easier on ld.so. This is not what the rest of BFD set up. */
+ dyn.d_un.d_val -= (ia64_info->minplt_entries
+ * sizeof (Elf64_External_Rela));
+ break;
+
+ case DT_INIT:
+ case DT_FINI:
+ {
+ struct elf_link_hash_entry *h;
+ struct elf64_ia64_dyn_sym_info *dyn_i;
+ const char *which;
+
+ if (dyn.d_tag == DT_INIT)
+ which = info->init_function;
+ else
+ which = info->fini_function;
+
+ h = elf_link_hash_lookup (elf_hash_table (info), which,
+ false, false, false);
+ dyn_i = get_dyn_sym_info (ia64_info, h, NULL, NULL, false);
+ dyn.d_un.d_ptr = set_pltoff_entry (abfd, info, dyn_i,
+ dyn.d_un.d_ptr, 0);
+ }
+ }
+
+ bfd_elf64_swap_dyn_out (abfd, &dyn, dyncon);
+ }
+
+ /* Initialize the PLT0 entry */
+ if (ia64_info->plt_sec)
+ {
+ bfd_byte *loc = ia64_info->plt_sec->contents;
+ bfd_vma pltres;
+
+ memcpy (loc, plt_header, PLT_HEADER_SIZE);
+
+ pltres = (sgotplt->output_section->vma
+ + sgotplt->output_offset
+ - gp_val);
+
+ elf64_ia64_install_value (abfd, loc+1, pltres, R_IA64_GPREL22);
+ }
+ }
+
+ return true;
+}
+
+/* ELF file flag handling: */
+
+/* Function to keep IA-64 specific file flags. */
+static boolean
+elf64_ia64_set_private_flags (abfd, flags)
+ bfd *abfd;
+ flagword flags;
+{
+ BFD_ASSERT (!elf_flags_init (abfd)
+ || elf_elfheader (abfd)->e_flags == flags);
+
+ elf_elfheader (abfd)->e_flags = flags;
+ elf_flags_init (abfd) = true;
+ return true;
+}
+
+/* Copy backend specific data from one object module to another */
+static boolean
+elf64_ia64_copy_private_bfd_data (ibfd, obfd)
+ bfd *ibfd, *obfd;
+{
+ if ( bfd_get_flavour (ibfd) != bfd_target_elf_flavour
+ || bfd_get_flavour (obfd) != bfd_target_elf_flavour)
+ return true;
+
+ BFD_ASSERT (!elf_flags_init (obfd)
+ || (elf_elfheader (obfd)->e_flags
+ == elf_elfheader (ibfd)->e_flags));
+
+ elf_elfheader (obfd)->e_flags = elf_elfheader (ibfd)->e_flags;
+ elf_flags_init (obfd) = true;
+ return true;
+}
+
+/* Merge backend specific data from an object file to the output
+ object file when linking. */
+static boolean
+elf64_ia64_merge_private_bfd_data (ibfd, obfd)
+ bfd *ibfd, *obfd;
+{
+ flagword out_flags;
+ flagword in_flags;
+ boolean ok = true;
+
+ /* Don't even pretend to support mixed-format linking. */
+ if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour
+ || bfd_get_flavour (obfd) != bfd_target_elf_flavour)
+ return false;
+
+ in_flags = elf_elfheader (ibfd)->e_flags;
+ out_flags = elf_elfheader (obfd)->e_flags;
+
+ if (! elf_flags_init (obfd))
+ {
+ elf_flags_init (obfd) = true;
+ elf_elfheader (obfd)->e_flags = in_flags;
+
+ if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
+ && bfd_get_arch_info (obfd)->the_default)
+ {
+ return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
+ bfd_get_mach (ibfd));
+ }
+
+ return true;
+ }
+
+ /* Check flag compatibility. */
+ if (in_flags == out_flags)
+ return true;
+
+ if ((in_flags & EF_IA_64_TRAPNIL) != (out_flags & EF_IA_64_TRAPNIL))
+ {
+ (*_bfd_error_handler)
+ (_("%s: linking trap-on-NULL-dereference with non-trapping files"),
+ bfd_get_filename (ibfd));
+
+ bfd_set_error (bfd_error_bad_value);
+ ok = false;
+ }
+ if ((in_flags & EF_IA_64_BE) != (out_flags & EF_IA_64_BE))
+ {
+ (*_bfd_error_handler)
+ (_("%s: linking big-endian files with little-endian files"),
+ bfd_get_filename (ibfd));
+
+ bfd_set_error (bfd_error_bad_value);
+ ok = false;
+ }
+ if ((in_flags & EF_IA_64_ABI64) != (out_flags & EF_IA_64_ABI64))
+ {
+ (*_bfd_error_handler)
+ (_("%s: linking 64-bit files with 32-bit files"),
+ bfd_get_filename (ibfd));
+
+ bfd_set_error (bfd_error_bad_value);
+ ok = false;
+ }
+
+ return ok;
+}
+
+static boolean
+elf64_ia64_print_private_bfd_data (abfd, ptr)
+ bfd *abfd;
+ PTR ptr;
+{
+ FILE *file = (FILE *) ptr;
+ flagword flags = elf_elfheader (abfd)->e_flags;
+
+ BFD_ASSERT (abfd != NULL && ptr != NULL);
+
+ fprintf (file, "private flags = %s%s%s%s\n",
+ (flags & EF_IA_64_TRAPNIL) ? "TRAPNIL, " : "",
+ (flags & EF_IA_64_EXT) ? "EXT, " : "",
+ (flags & EF_IA_64_BE) ? "BE, " : "LE, ",
+ (flags & EF_IA_64_ABI64) ? "ABI64" : "ABI32");
+ _bfd_elf_print_private_bfd_data (abfd, ptr);
+ return true;
+}
+
+#define TARGET_LITTLE_SYM bfd_elf64_ia64_little_vec
+#define TARGET_LITTLE_NAME "elf64-ia64-little"
+#define TARGET_BIG_SYM bfd_elf64_ia64_big_vec
+#define TARGET_BIG_NAME "elf64-ia64-big"
+#define ELF_ARCH bfd_arch_ia64
+#define ELF_MACHINE_CODE EM_IA_64
+#define ELF_MACHINE_ALT1 1999 /* EAS2.3 */
+#define ELF_MACHINE_ALT2 1998 /* EAS2.2 */
+#define ELF_MAXPAGESIZE 0x10000 /* 64KB */
+
+#define elf_backend_section_from_shdr \
+ elf64_ia64_section_from_shdr
+#define elf_backend_fake_sections \
+ elf64_ia64_fake_sections
+#define elf_backend_add_symbol_hook \
+ elf64_ia64_add_symbol_hook
+#define elf_backend_additional_program_headers \
+ elf64_ia64_additional_program_headers
+#define elf_backend_modify_segment_map \
+ elf64_ia64_modify_segment_map
+#define elf_info_to_howto \
+ elf64_ia64_info_to_howto
+
+#define bfd_elf64_bfd_reloc_type_lookup \
+ elf64_ia64_reloc_type_lookup
+#define bfd_elf64_bfd_is_local_label_name \
+ elf64_ia64_is_local_label_name
+
+/* Stuff for the BFD linker: */
+#define bfd_elf64_bfd_link_hash_table_create \
+ elf64_ia64_hash_table_create
+#define elf_backend_create_dynamic_sections \
+ elf64_ia64_create_dynamic_sections
+#define elf_backend_check_relocs \
+ elf64_ia64_check_relocs
+#define elf_backend_adjust_dynamic_symbol \
+ elf64_ia64_adjust_dynamic_symbol
+#define elf_backend_size_dynamic_sections \
+ elf64_ia64_size_dynamic_sections
+#define elf_backend_relocate_section \
+ elf64_ia64_relocate_section
+#define elf_backend_finish_dynamic_symbol \
+ elf64_ia64_finish_dynamic_symbol
+#define elf_backend_finish_dynamic_sections \
+ elf64_ia64_finish_dynamic_sections
+#define bfd_elf64_bfd_final_link \
+ elf64_ia64_final_link
+
+#define bfd_elf64_bfd_copy_private_bfd_data \
+ elf64_ia64_copy_private_bfd_data
+#define bfd_elf64_bfd_merge_private_bfd_data \
+ elf64_ia64_merge_private_bfd_data
+#define bfd_elf64_bfd_set_private_flags \
+ elf64_ia64_set_private_flags
+#define bfd_elf64_bfd_print_private_bfd_data \
+ elf64_ia64_print_private_bfd_data
+
+#define elf_backend_plt_readonly 1
+#define elf_backend_want_plt_sym 0
+#define elf_backend_plt_alignment 5
+#define elf_backend_got_header_size 0
+#define elf_backend_plt_header_size PLT_HEADER_SIZE
+#define elf_backend_want_got_plt 1
+#define elf_backend_may_use_rel_p 1
+#define elf_backend_may_use_rela_p 1
+#define elf_backend_default_use_rela_p 1
+#define elf_backend_want_dynbss 0
+#define elf_backend_copy_indirect_symbol elf64_ia64_hash_copy_indirect
+#define elf_backend_hide_symbol elf64_ia64_hash_hide_symbol
+
+#include "elf64-target.h"
diff --git a/bfd/libbfd.h b/bfd/libbfd.h
index 20ffcdc..8aef81b 100644
--- a/bfd/libbfd.h
+++ b/bfd/libbfd.h
@@ -898,6 +898,69 @@ static const char *const bfd_reloc_code_real_names[] = { "@@uninitialized@@",
"BFD_RELOC_AVR_CALL",
"BFD_RELOC_VTABLE_INHERIT",
"BFD_RELOC_VTABLE_ENTRY",
+ "BFD_RELOC_IA64_IMM14",
+ "BFD_RELOC_IA64_IMM22",
+ "BFD_RELOC_IA64_IMM64",
+ "BFD_RELOC_IA64_DIR32MSB",
+ "BFD_RELOC_IA64_DIR32LSB",
+ "BFD_RELOC_IA64_DIR64MSB",
+ "BFD_RELOC_IA64_DIR64LSB",
+ "BFD_RELOC_IA64_GPREL22",
+ "BFD_RELOC_IA64_GPREL64I",
+ "BFD_RELOC_IA64_GPREL32MSB",
+ "BFD_RELOC_IA64_GPREL32LSB",
+ "BFD_RELOC_IA64_GPREL64MSB",
+ "BFD_RELOC_IA64_GPREL64LSB",
+ "BFD_RELOC_IA64_LTOFF22",
+ "BFD_RELOC_IA64_LTOFF64I",
+ "BFD_RELOC_IA64_PLTOFF22",
+ "BFD_RELOC_IA64_PLTOFF64I",
+ "BFD_RELOC_IA64_PLTOFF64MSB",
+ "BFD_RELOC_IA64_PLTOFF64LSB",
+ "BFD_RELOC_IA64_FPTR64I",
+ "BFD_RELOC_IA64_FPTR32MSB",
+ "BFD_RELOC_IA64_FPTR32LSB",
+ "BFD_RELOC_IA64_FPTR64MSB",
+ "BFD_RELOC_IA64_FPTR64LSB",
+ "BFD_RELOC_IA64_PCREL21B",
+ "BFD_RELOC_IA64_PCREL21M",
+ "BFD_RELOC_IA64_PCREL21F",
+ "BFD_RELOC_IA64_PCREL32MSB",
+ "BFD_RELOC_IA64_PCREL32LSB",
+ "BFD_RELOC_IA64_PCREL64MSB",
+ "BFD_RELOC_IA64_PCREL64LSB",
+ "BFD_RELOC_IA64_LTOFF_FPTR22",
+ "BFD_RELOC_IA64_LTOFF_FPTR64I",
+ "BFD_RELOC_IA64_LTOFF_FPTR64MSB",
+ "BFD_RELOC_IA64_LTOFF_FPTR64LSB",
+ "BFD_RELOC_IA64_SEGBASE",
+ "BFD_RELOC_IA64_SEGREL32MSB",
+ "BFD_RELOC_IA64_SEGREL32LSB",
+ "BFD_RELOC_IA64_SEGREL64MSB",
+ "BFD_RELOC_IA64_SEGREL64LSB",
+ "BFD_RELOC_IA64_SECREL32MSB",
+ "BFD_RELOC_IA64_SECREL32LSB",
+ "BFD_RELOC_IA64_SECREL64MSB",
+ "BFD_RELOC_IA64_SECREL64LSB",
+ "BFD_RELOC_IA64_REL32MSB",
+ "BFD_RELOC_IA64_REL32LSB",
+ "BFD_RELOC_IA64_REL64MSB",
+ "BFD_RELOC_IA64_REL64LSB",
+ "BFD_RELOC_IA64_LTV32MSB",
+ "BFD_RELOC_IA64_LTV32LSB",
+ "BFD_RELOC_IA64_LTV64MSB",
+ "BFD_RELOC_IA64_LTV64LSB",
+ "BFD_RELOC_IA64_IPLTMSB",
+ "BFD_RELOC_IA64_IPLTLSB",
+ "BFD_RELOC_IA64_EPLTMSB",
+ "BFD_RELOC_IA64_EPLTLSB",
+ "BFD_RELOC_IA64_COPY",
+ "BFD_RELOC_IA64_TPREL22",
+ "BFD_RELOC_IA64_TPREL64MSB",
+ "BFD_RELOC_IA64_TPREL64LSB",
+ "BFD_RELOC_IA64_LTOFF_TP22",
+ "BFD_RELOC_IA64_LTOFF22X",
+ "BFD_RELOC_IA64_LDXMOV",
"@@overflow: BFD_RELOC_UNUSED@@",
};
#endif
diff --git a/bfd/reloc.c b/bfd/reloc.c
index f312ffc..12f07b6 100644
--- a/bfd/reloc.c
+++ b/bfd/reloc.c
@@ -2749,6 +2749,134 @@ ENUMDOC
is stored in the reloc's addend. For Rel hosts, we are forced to put
this offset in the reloc's section offset.
+ENUM
+ BFD_RELOC_IA64_IMM14
+ENUMX
+ BFD_RELOC_IA64_IMM22
+ENUMX
+ BFD_RELOC_IA64_IMM64
+ENUMX
+ BFD_RELOC_IA64_DIR32MSB
+ENUMX
+ BFD_RELOC_IA64_DIR32LSB
+ENUMX
+ BFD_RELOC_IA64_DIR64MSB
+ENUMX
+ BFD_RELOC_IA64_DIR64LSB
+ENUMX
+ BFD_RELOC_IA64_GPREL22
+ENUMX
+ BFD_RELOC_IA64_GPREL64I
+ENUMX
+ BFD_RELOC_IA64_GPREL32MSB
+ENUMX
+ BFD_RELOC_IA64_GPREL32LSB
+ENUMX
+ BFD_RELOC_IA64_GPREL64MSB
+ENUMX
+ BFD_RELOC_IA64_GPREL64LSB
+ENUMX
+ BFD_RELOC_IA64_LTOFF22
+ENUMX
+ BFD_RELOC_IA64_LTOFF64I
+ENUMX
+ BFD_RELOC_IA64_PLTOFF22
+ENUMX
+ BFD_RELOC_IA64_PLTOFF64I
+ENUMX
+ BFD_RELOC_IA64_PLTOFF64MSB
+ENUMX
+ BFD_RELOC_IA64_PLTOFF64LSB
+ENUMX
+ BFD_RELOC_IA64_FPTR64I
+ENUMX
+ BFD_RELOC_IA64_FPTR32MSB
+ENUMX
+ BFD_RELOC_IA64_FPTR32LSB
+ENUMX
+ BFD_RELOC_IA64_FPTR64MSB
+ENUMX
+ BFD_RELOC_IA64_FPTR64LSB
+ENUMX
+ BFD_RELOC_IA64_PCREL21B
+ENUMX
+ BFD_RELOC_IA64_PCREL21M
+ENUMX
+ BFD_RELOC_IA64_PCREL21F
+ENUMX
+ BFD_RELOC_IA64_PCREL32MSB
+ENUMX
+ BFD_RELOC_IA64_PCREL32LSB
+ENUMX
+ BFD_RELOC_IA64_PCREL64MSB
+ENUMX
+ BFD_RELOC_IA64_PCREL64LSB
+ENUMX
+ BFD_RELOC_IA64_LTOFF_FPTR22
+ENUMX
+ BFD_RELOC_IA64_LTOFF_FPTR64I
+ENUMX
+ BFD_RELOC_IA64_LTOFF_FPTR64MSB
+ENUMX
+ BFD_RELOC_IA64_LTOFF_FPTR64LSB
+ENUMX
+ BFD_RELOC_IA64_SEGBASE
+ENUMX
+ BFD_RELOC_IA64_SEGREL32MSB
+ENUMX
+ BFD_RELOC_IA64_SEGREL32LSB
+ENUMX
+ BFD_RELOC_IA64_SEGREL64MSB
+ENUMX
+ BFD_RELOC_IA64_SEGREL64LSB
+ENUMX
+ BFD_RELOC_IA64_SECREL32MSB
+ENUMX
+ BFD_RELOC_IA64_SECREL32LSB
+ENUMX
+ BFD_RELOC_IA64_SECREL64MSB
+ENUMX
+ BFD_RELOC_IA64_SECREL64LSB
+ENUMX
+ BFD_RELOC_IA64_REL32MSB
+ENUMX
+ BFD_RELOC_IA64_REL32LSB
+ENUMX
+ BFD_RELOC_IA64_REL64MSB
+ENUMX
+ BFD_RELOC_IA64_REL64LSB
+ENUMX
+ BFD_RELOC_IA64_LTV32MSB
+ENUMX
+ BFD_RELOC_IA64_LTV32LSB
+ENUMX
+ BFD_RELOC_IA64_LTV64MSB
+ENUMX
+ BFD_RELOC_IA64_LTV64LSB
+ENUMX
+ BFD_RELOC_IA64_IPLTMSB
+ENUMX
+ BFD_RELOC_IA64_IPLTLSB
+ENUMX
+ BFD_RELOC_IA64_EPLTMSB
+ENUMX
+ BFD_RELOC_IA64_EPLTLSB
+ENUMX
+ BFD_RELOC_IA64_COPY
+ENUMX
+ BFD_RELOC_IA64_TPREL22
+ENUMX
+ BFD_RELOC_IA64_TPREL64MSB
+ENUMX
+ BFD_RELOC_IA64_TPREL64LSB
+ENUMX
+ BFD_RELOC_IA64_LTOFF_TP22
+ENUMX
+ BFD_RELOC_IA64_LTOFF22X
+ENUMX
+ BFD_RELOC_IA64_LDXMOV
+ENUMDOC
+ Intel IA64 Relocations.
ENDSENUM
BFD_RELOC_UNUSED
CODE_FRAGMENT
diff --git a/bfd/targets.c b/bfd/targets.c
index 8217d56..b92a1c3 100644
--- a/bfd/targets.c
+++ b/bfd/targets.c
@@ -508,6 +508,8 @@ extern const bfd_target b_out_vec_little_host;
extern const bfd_target bfd_efi_app_ia32_vec;
extern const bfd_target bfd_efi_app_ia64_vec;
extern const bfd_target bfd_elf64_alpha_vec;
+extern const bfd_target bfd_elf64_ia64_little_vec;
+extern const bfd_target bfd_elf64_ia64_big_vec;
extern const bfd_target bfd_elf32_avr_vec;
extern const bfd_target bfd_elf32_bigarc_vec;
extern const bfd_target bfd_elf32_bigarm_vec;
@@ -703,6 +705,8 @@ const bfd_target * const bfd_target_vector[] = {
&bfd_elf32_big_generic_vec,
#ifdef BFD64
&bfd_elf64_alpha_vec,
+ &bfd_elf64_ia64_little_vec,
+ &bfd_elf64_ia64_big_vec,
#endif
&bfd_elf32_avr_vec,
&bfd_elf32_bigarc_vec,