aboutsummaryrefslogtreecommitdiff
path: root/bfd/elfxx-aarch64.c
diff options
context:
space:
mode:
authorYufeng Zhang <yufeng.zhang@arm.com>2013-06-26 10:49:29 +0000
committerYufeng Zhang <yufeng.zhang@arm.com>2013-06-26 10:49:29 +0000
commitcaed712042c10c84bbf25a266988874679813689 (patch)
treeed7ae74e159c7dbf4a0ce45c36aabb69782d96ff /bfd/elfxx-aarch64.c
parenta6bb11b2df1b69d79828a52a44eaf29ebc4d4955 (diff)
downloadgdb-caed712042c10c84bbf25a266988874679813689.zip
gdb-caed712042c10c84bbf25a266988874679813689.tar.gz
gdb-caed712042c10c84bbf25a266988874679813689.tar.bz2
[AArch64, ILP32] 4/6 Split elfnn-aarch64.c to elfxx-aarch64.c
bfd/ * Makefile.am (BFD64_BACKENDS): Add elfxx-aarch64.c. (BFD64_BACKENDS_CFILES): Add elfxx-aarch64.lo. * Makefile.in: Re-generated. * configure.in (bfd_elf64_bigaarch64_vec): Add elfxx-aarch64.lo. (bfd_elf64_littleaarch64_vec): Likewise. (bfd_elf32_bigaarch64_vec): Likewise. (bfd_elf32_littleaarch64_vec): Likewise. * configure: Re-generated. * elfxx-aarch64.c: New file; split from elf64-aarch64.c. * elfxx-aarch64.h: New file. * elfnn-aarch64.c: Include "elfxx-aarch64.h"; move the following stuff to elfxx-aarch64.c. (bfd_elf_aarch64_put_addend): Removed. (PG_OFFSET, PG): Likewise. (elfNN_aarch64_small_plt0_entry): Support ELF32. (elfNN_aarch64_tlsdesc_small_plt_entry): Likewise. (elf64_aarch64_grok_prstatus): Removed. (elf_backend_grok_prstatus): Removed. (insn32): Likewise. (aarch64_unsigned_overflow): Likewise. (aarch64_signed_overflow): Likewise. (aarch64_resolve_relocation): Likewise. (MASK): Likewise. (decode_branch_ofs_26): Likewise. (decode_cond_branch_ofs_19): Likewise. (decode_ld_lit_ofs_19): Likewise. (decode_tst_branch_ofs_14): Likewise. (decode_movw_imm): Likewise. (decode_adr_imm): Likewise. (decode_add_imm): Likewise. (reencode_branch_ofs_26): Likewise. (reencode_cond_branch_ofs_19): Likewise. (reencode_ld_lit_ofs_19): Likewise. (reencode_tst_branch_ofs_14): Likewise. (reencode_movw_imm): Likewise. (reencode_adr_imm): Likewise. (reencode_ldst_pos_imm): Likewise. (reencode_add_imm): Likewise. (reencode_movzn_to_movz): Likewise. (reencode_movzn_to_movn): Likewise. (aarch64_relocate): Update to call the new function names in elfxx-aarch64.c. (aarch64_calculate_got_entry_vma): Likewise. (elfNN_aarch64_final_link_relocate): Likewise. (elf64_aarch64_update_plt_entry): Likewise; change the type of the parameter 'r_type' to bfd_reloc_code_real_type; rename to ... (elf_aarch64_update_plt_entry): ... this. (elfNN_aarch64_create_small_pltn_entry): Update. (elfNN_aarch64_init_small_plt0_entry): Remove plt_got_base; add bfd_vma plt_got_2nd_ent; update to call elf_aarch64_update_plt_entry. (elfNN_aarch64_finish_dynamic_sections): Add plt_entry; update to call elf_aarch64_update_plt_entry.
Diffstat (limited to 'bfd/elfxx-aarch64.c')
-rw-r--r--bfd/elfxx-aarch64.c522
1 files changed, 522 insertions, 0 deletions
diff --git a/bfd/elfxx-aarch64.c b/bfd/elfxx-aarch64.c
new file mode 100644
index 0000000..395c999
--- /dev/null
+++ b/bfd/elfxx-aarch64.c
@@ -0,0 +1,522 @@
+/* AArch64-specific support for ELF.
+ Copyright 2009-2013 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of BFD, the Binary File Descriptor library.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING3. If not,
+ see <http://www.gnu.org/licenses/>. */
+
+#include "sysdep.h"
+#include "elfxx-aarch64.h"
+
+#define MASK(n) ((1u << (n)) - 1)
+
+/* Decode the 26-bit offset of unconditional branch. */
+static inline uint32_t
+decode_branch_ofs_26 (uint32_t insn)
+{
+ return insn & MASK (26);
+}
+
+/* Decode the 19-bit offset of conditional branch and compare & branch. */
+static inline uint32_t
+decode_cond_branch_ofs_19 (uint32_t insn)
+{
+ return (insn >> 5) & MASK (19);
+}
+
+/* Decode the 19-bit offset of load literal. */
+static inline uint32_t
+decode_ld_lit_ofs_19 (uint32_t insn)
+{
+ return (insn >> 5) & MASK (19);
+}
+
+/* Decode the 14-bit offset of test & branch. */
+static inline uint32_t
+decode_tst_branch_ofs_14 (uint32_t insn)
+{
+ return (insn >> 5) & MASK (14);
+}
+
+/* Decode the 16-bit imm of move wide. */
+static inline uint32_t
+decode_movw_imm (uint32_t insn)
+{
+ return (insn >> 5) & MASK (16);
+}
+
+/* Decode the 12-bit imm of add immediate. */
+static inline uint32_t
+decode_add_imm (uint32_t insn)
+{
+ return (insn >> 10) & MASK (12);
+}
+
+/* Reencode the imm field of add immediate. */
+static inline uint32_t
+reencode_add_imm (uint32_t insn, uint32_t imm)
+{
+ return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
+}
+
+/* Reencode the imm field of adr. */
+static inline uint32_t
+reencode_adr_imm (uint32_t insn, uint32_t imm)
+{
+ return (insn & ~((MASK (2) << 29) | (MASK (19) << 5)))
+ | ((imm & MASK (2)) << 29) | ((imm & (MASK (19) << 2)) << 3);
+}
+
+/* Reencode the imm field of ld/st pos immediate. */
+static inline uint32_t
+reencode_ldst_pos_imm (uint32_t insn, uint32_t imm)
+{
+ return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
+}
+
+/* Encode the 26-bit offset of unconditional branch. */
+static inline uint32_t
+reencode_branch_ofs_26 (uint32_t insn, uint32_t ofs)
+{
+ return (insn & ~MASK (26)) | (ofs & MASK (26));
+}
+
+/* Encode the 19-bit offset of conditional branch and compare & branch. */
+static inline uint32_t
+reencode_cond_branch_ofs_19 (uint32_t insn, uint32_t ofs)
+{
+ return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
+}
+
+/* Decode the 19-bit offset of load literal. */
+static inline uint32_t
+reencode_ld_lit_ofs_19 (uint32_t insn, uint32_t ofs)
+{
+ return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
+}
+
+/* Encode the 14-bit offset of test & branch. */
+static inline uint32_t
+reencode_tst_branch_ofs_14 (uint32_t insn, uint32_t ofs)
+{
+ return (insn & ~(MASK (14) << 5)) | ((ofs & MASK (14)) << 5);
+}
+
+/* Reencode the imm field of move wide. */
+static inline uint32_t
+reencode_movw_imm (uint32_t insn, uint32_t imm)
+{
+ return (insn & ~(MASK (16) << 5)) | ((imm & MASK (16)) << 5);
+}
+
+/* Reencode mov[zn] to movz. */
+static inline uint32_t
+reencode_movzn_to_movz (uint32_t opcode)
+{
+ return opcode | (1 << 30);
+}
+
+/* Reencode mov[zn] to movn. */
+static inline uint32_t
+reencode_movzn_to_movn (uint32_t opcode)
+{
+ return opcode & ~(1 << 30);
+}
+
+/* Return non-zero if the indicated VALUE has overflowed the maximum
+ range expressible by a unsigned number with the indicated number of
+ BITS. */
+
+static bfd_reloc_status_type
+aarch64_unsigned_overflow (bfd_vma value, unsigned int bits)
+{
+ bfd_vma lim;
+ if (bits >= sizeof (bfd_vma) * 8)
+ return bfd_reloc_ok;
+ lim = (bfd_vma) 1 << bits;
+ if (value >= lim)
+ return bfd_reloc_overflow;
+ return bfd_reloc_ok;
+}
+
+/* Return non-zero if the indicated VALUE has overflowed the maximum
+ range expressible by an signed number with the indicated number of
+ BITS. */
+
+static bfd_reloc_status_type
+aarch64_signed_overflow (bfd_vma value, unsigned int bits)
+{
+ bfd_signed_vma svalue = (bfd_signed_vma) value;
+ bfd_signed_vma lim;
+
+ if (bits >= sizeof (bfd_vma) * 8)
+ return bfd_reloc_ok;
+ lim = (bfd_signed_vma) 1 << (bits - 1);
+ if (svalue < -lim || svalue >= lim)
+ return bfd_reloc_overflow;
+ return bfd_reloc_ok;
+}
+
+/* Insert the addend/value into the instruction or data object being
+ relocated. */
+bfd_reloc_status_type
+_bfd_aarch64_elf_put_addend (bfd *abfd,
+ bfd_byte *address, bfd_reloc_code_real_type r_type,
+ reloc_howto_type *howto, bfd_signed_vma addend)
+{
+ bfd_reloc_status_type status = bfd_reloc_ok;
+ bfd_signed_vma old_addend = addend;
+ bfd_vma contents;
+ int size;
+
+ size = bfd_get_reloc_size (howto);
+ switch (size)
+ {
+ case 2:
+ contents = bfd_get_16 (abfd, address);
+ break;
+ case 4:
+ if (howto->src_mask != 0xffffffff)
+ /* Must be 32-bit instruction, always little-endian. */
+ contents = bfd_getl32 (address);
+ else
+ /* Must be 32-bit data (endianness dependent). */
+ contents = bfd_get_32 (abfd, address);
+ break;
+ case 8:
+ contents = bfd_get_64 (abfd, address);
+ break;
+ default:
+ abort ();
+ }
+
+ switch (howto->complain_on_overflow)
+ {
+ case complain_overflow_dont:
+ break;
+ case complain_overflow_signed:
+ status = aarch64_signed_overflow (addend,
+ howto->bitsize + howto->rightshift);
+ break;
+ case complain_overflow_unsigned:
+ status = aarch64_unsigned_overflow (addend,
+ howto->bitsize + howto->rightshift);
+ break;
+ case complain_overflow_bitfield:
+ default:
+ abort ();
+ }
+
+ addend >>= howto->rightshift;
+
+ switch (r_type)
+ {
+ case BFD_RELOC_AARCH64_JUMP26:
+ case BFD_RELOC_AARCH64_CALL26:
+ contents = reencode_branch_ofs_26 (contents, addend);
+ break;
+
+ case BFD_RELOC_AARCH64_BRANCH19:
+ contents = reencode_cond_branch_ofs_19 (contents, addend);
+ break;
+
+ case BFD_RELOC_AARCH64_TSTBR14:
+ contents = reencode_tst_branch_ofs_14 (contents, addend);
+ break;
+
+ case BFD_RELOC_AARCH64_LD_LO19_PCREL:
+ case BFD_RELOC_AARCH64_GOT_LD_PREL19:
+ if (old_addend & ((1 << howto->rightshift) - 1))
+ return bfd_reloc_overflow;
+ contents = reencode_ld_lit_ofs_19 (contents, addend);
+ break;
+
+ case BFD_RELOC_AARCH64_TLSDESC_CALL:
+ break;
+
+ case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
+ case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
+ case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
+ case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
+ case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
+ case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
+ case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
+ contents = reencode_adr_imm (contents, addend);
+ break;
+
+ case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
+ case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
+ case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
+ case BFD_RELOC_AARCH64_ADD_LO12:
+ /* Corresponds to: add rd, rn, #uimm12 to provide the low order
+ 12 bits of the page offset following
+ BFD_RELOC_AARCH64_ADR_HI21_PCREL which computes the
+ (pc-relative) page base. */
+ contents = reencode_add_imm (contents, addend);
+ break;
+
+ case BFD_RELOC_AARCH64_LDST8_LO12:
+ case BFD_RELOC_AARCH64_LDST16_LO12:
+ case BFD_RELOC_AARCH64_LDST32_LO12:
+ case BFD_RELOC_AARCH64_LDST64_LO12:
+ case BFD_RELOC_AARCH64_LDST128_LO12:
+ case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
+ case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
+ case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
+ if (old_addend & ((1 << howto->rightshift) - 1))
+ return bfd_reloc_overflow;
+ /* Used for ldr*|str* rt, [rn, #uimm12] to provide the low order
+ 12 bits of the page offset following BFD_RELOC_AARCH64_ADR_HI21_PCREL
+ which computes the (pc-relative) page base. */
+ contents = reencode_ldst_pos_imm (contents, addend);
+ break;
+
+ /* Group relocations to create high bits of a 16, 32, 48 or 64
+ bit signed data or abs address inline. Will change
+ instruction to MOVN or MOVZ depending on sign of calculated
+ value. */
+
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
+ case BFD_RELOC_AARCH64_MOVW_G0_S:
+ case BFD_RELOC_AARCH64_MOVW_G1_S:
+ case BFD_RELOC_AARCH64_MOVW_G2_S:
+ /* NOTE: We can only come here with movz or movn. */
+ if (addend < 0)
+ {
+ /* Force use of MOVN. */
+ addend = ~addend;
+ contents = reencode_movzn_to_movn (contents);
+ }
+ else
+ {
+ /* Force use of MOVZ. */
+ contents = reencode_movzn_to_movz (contents);
+ }
+ /* fall through */
+
+ /* Group relocations to create a 16, 32, 48 or 64 bit unsigned
+ data or abs address inline. */
+
+ case BFD_RELOC_AARCH64_MOVW_G0:
+ case BFD_RELOC_AARCH64_MOVW_G0_NC:
+ case BFD_RELOC_AARCH64_MOVW_G1:
+ case BFD_RELOC_AARCH64_MOVW_G1_NC:
+ case BFD_RELOC_AARCH64_MOVW_G2:
+ case BFD_RELOC_AARCH64_MOVW_G2_NC:
+ case BFD_RELOC_AARCH64_MOVW_G3:
+ contents = reencode_movw_imm (contents, addend);
+ break;
+
+ default:
+ /* Repack simple data */
+ if (howto->dst_mask & (howto->dst_mask + 1))
+ return bfd_reloc_notsupported;
+
+ contents = ((contents & ~howto->dst_mask) | (addend & howto->dst_mask));
+ break;
+ }
+
+ switch (size)
+ {
+ case 2:
+ bfd_put_16 (abfd, contents, address);
+ break;
+ case 4:
+ if (howto->dst_mask != 0xffffffff)
+ /* must be 32-bit instruction, always little-endian */
+ bfd_putl32 (contents, address);
+ else
+ /* must be 32-bit data (endianness dependent) */
+ bfd_put_32 (abfd, contents, address);
+ break;
+ case 8:
+ bfd_put_64 (abfd, contents, address);
+ break;
+ default:
+ abort ();
+ }
+
+ return status;
+}
+
+bfd_vma
+_bfd_aarch64_elf_resolve_relocation (bfd_reloc_code_real_type r_type,
+ bfd_vma place, bfd_vma value,
+ bfd_vma addend, bfd_boolean weak_undef_p)
+{
+ switch (r_type)
+ {
+ case BFD_RELOC_AARCH64_TLSDESC_CALL:
+ case BFD_RELOC_AARCH64_NONE:
+ break;
+
+ case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
+ case BFD_RELOC_AARCH64_BRANCH19:
+ case BFD_RELOC_AARCH64_LD_LO19_PCREL:
+ case BFD_RELOC_AARCH64_16_PCREL:
+ case BFD_RELOC_AARCH64_32_PCREL:
+ case BFD_RELOC_AARCH64_64_PCREL:
+ case BFD_RELOC_AARCH64_TSTBR14:
+ if (weak_undef_p)
+ value = place;
+ value = value + addend - place;
+ break;
+
+ case BFD_RELOC_AARCH64_CALL26:
+ case BFD_RELOC_AARCH64_JUMP26:
+ value = value + addend - place;
+ break;
+
+ case BFD_RELOC_AARCH64_16:
+ case BFD_RELOC_AARCH64_32:
+ case BFD_RELOC_AARCH64_MOVW_G0_S:
+ case BFD_RELOC_AARCH64_MOVW_G1_S:
+ case BFD_RELOC_AARCH64_MOVW_G2_S:
+ case BFD_RELOC_AARCH64_MOVW_G0:
+ case BFD_RELOC_AARCH64_MOVW_G0_NC:
+ case BFD_RELOC_AARCH64_MOVW_G1:
+ case BFD_RELOC_AARCH64_MOVW_G1_NC:
+ case BFD_RELOC_AARCH64_MOVW_G2:
+ case BFD_RELOC_AARCH64_MOVW_G2_NC:
+ case BFD_RELOC_AARCH64_MOVW_G3:
+ value = value + addend;
+ break;
+
+ case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
+ case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
+ if (weak_undef_p)
+ value = PG (place);
+ value = PG (value + addend) - PG (place);
+ break;
+
+ case BFD_RELOC_AARCH64_GOT_LD_PREL19:
+ value = value + addend - place;
+ break;
+
+ case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
+ case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
+ case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
+ case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
+ value = PG (value + addend) - PG (place);
+ break;
+
+ case BFD_RELOC_AARCH64_ADD_LO12:
+ case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
+ case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
+ case BFD_RELOC_AARCH64_LDST8_LO12:
+ case BFD_RELOC_AARCH64_LDST16_LO12:
+ case BFD_RELOC_AARCH64_LDST32_LO12:
+ case BFD_RELOC_AARCH64_LDST64_LO12:
+ case BFD_RELOC_AARCH64_LDST128_LO12:
+ case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_ADD:
+ case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSDESC_LDR:
+ case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
+ case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
+ case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
+ value = PG_OFFSET (value + addend);
+ break;
+
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
+ value = (value + addend) & (bfd_vma) 0xffff0000;
+ break;
+ case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
+ value = (value + addend) & (bfd_vma) 0xfff000;
+ break;
+
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
+ value = (value + addend) & (bfd_vma) 0xffff;
+ break;
+
+ case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
+ value = (value + addend) & ~(bfd_vma) 0xffffffff;
+ value -= place & ~(bfd_vma) 0xffffffff;
+ break;
+
+ default:
+ break;
+ }
+
+ return value;
+}
+
+/* Hook called by the linker routine which adds symbols from an object
+ file. */
+
+bfd_boolean
+_bfd_aarch64_elf_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
+ Elf_Internal_Sym *sym,
+ const char **namep ATTRIBUTE_UNUSED,
+ flagword *flagsp ATTRIBUTE_UNUSED,
+ asection **secp ATTRIBUTE_UNUSED,
+ bfd_vma *valp ATTRIBUTE_UNUSED)
+{
+ if ((abfd->flags & DYNAMIC) == 0
+ && (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
+ || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE))
+ elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
+
+ return TRUE;
+}
+
+/* Support for core dump NOTE sections. */
+
+bfd_boolean
+_bfd_aarch64_elf_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
+{
+ int offset;
+ size_t size;
+
+ switch (note->descsz)
+ {
+ default:
+ return FALSE;
+
+ case 408: /* sizeof(struct elf_prstatus) on Linux/arm64. */
+ /* pr_cursig */
+ elf_tdata (abfd)->core->signal
+ = bfd_get_16 (abfd, note->descdata + 12);
+
+ /* pr_pid */
+ elf_tdata (abfd)->core->lwpid
+ = bfd_get_32 (abfd, note->descdata + 32);
+
+ /* pr_reg */
+ offset = 112;
+ size = 272;
+
+ break;
+ }
+
+ /* Make a ".reg/999" section. */
+ return _bfd_elfcore_make_pseudosection (abfd, ".reg",
+ size, note->descpos + offset);
+}