diff options
author | Pedro Alves <palves@redhat.com> | 2010-05-26 18:19:28 +0000 |
---|---|---|
committer | Pedro Alves <palves@redhat.com> | 2010-05-26 18:19:28 +0000 |
commit | dde08ee10924b57d4e1c80e40a6a1fe14c93dcb5 (patch) | |
tree | 1d24a9d5f315737edcb115c43f6ae024437126ba /gdb/amd64-tdep.c | |
parent | 0a5b531f15567f9b4d5b5ac6b6e89ee5e8e0e7f0 (diff) | |
download | gdb-dde08ee10924b57d4e1c80e40a6a1fe14c93dcb5.zip gdb-dde08ee10924b57d4e1c80e40a6a1fe14c93dcb5.tar.gz gdb-dde08ee10924b57d4e1c80e40a6a1fe14c93dcb5.tar.bz2 |
gdb/
2010-05-26 Pedro Alves <pedro@codesourcery.com>
* NEWS: Mention the `qRelocInsn' feature.
* gdbarch.sh (relocate_instruction): New.
* amd64-tdep.c (rip_relative_offset): New.
(append_insns): New.
(amd64_relocate_instruction): New.
(amd64_init_abi): Install it.
* i386-tdep.c (append_insns): New.
(i386_relocate_instruction): New.
(i386_gdbarch_init): Install it.
* remote.c (remote_get_noisy_reply): Handle qRelocInsn requests.
* gdbarch.h, gdbarch.c: Regenerate.
gdb/doc/
2010-05-26 Pedro Alves <pedro@codesourcery.com>
* gdb.texinfo (General Query Packets) <qSupported>: Describe the
`qRelocInsn' feature.
(Relocate instruction reply packet): New subsection
of `Tracepoint Packets'.
(Tracepoint Packets): Mention that packets QTDP and QTStart
support the qRelocInsn request, and add cross reference to new
subsection.
Diffstat (limited to 'gdb/amd64-tdep.c')
-rw-r--r-- | gdb/amd64-tdep.c | 119 |
1 files changed, 119 insertions, 0 deletions
diff --git a/gdb/amd64-tdep.c b/gdb/amd64-tdep.c index 6336b7a..9feed90 100644 --- a/gdb/amd64-tdep.c +++ b/gdb/amd64-tdep.c @@ -1507,6 +1507,123 @@ amd64_displaced_step_fixup (struct gdbarch *gdbarch, paddress (gdbarch, retaddr)); } } + +/* If the instruction INSN uses RIP-relative addressing, return the + offset into the raw INSN where the displacement to be adjusted is + found. Returns 0 if the instruction doesn't use RIP-relative + addressing. */ + +static int +rip_relative_offset (struct amd64_insn *insn) +{ + if (insn->modrm_offset != -1) + { + gdb_byte modrm = insn->raw_insn[insn->modrm_offset]; + + if ((modrm & 0xc7) == 0x05) + { + /* The displacement is found right after the ModRM byte. */ + return insn->modrm_offset + 1; + } + } + + return 0; +} + +static void +append_insns (CORE_ADDR *to, ULONGEST len, const gdb_byte *buf) +{ + target_write_memory (*to, buf, len); + *to += len; +} + +void +amd64_relocate_instruction (struct gdbarch *gdbarch, + CORE_ADDR *to, CORE_ADDR oldloc) +{ + enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); + int len = gdbarch_max_insn_length (gdbarch); + /* Extra space for sentinels. */ + int fixup_sentinel_space = len; + gdb_byte *buf = xmalloc (len + fixup_sentinel_space); + struct amd64_insn insn_details; + int offset = 0; + LONGEST rel32, newrel; + gdb_byte *insn; + int insn_length; + + read_memory (oldloc, buf, len); + + /* Set up the sentinel space so we don't have to worry about running + off the end of the buffer. An excessive number of leading prefixes + could otherwise cause this. */ + memset (buf + len, 0, fixup_sentinel_space); + + insn = buf; + amd64_get_insn_details (insn, &insn_details); + + insn_length = gdb_buffered_insn_length (gdbarch, insn, len, oldloc); + + /* Skip legacy instruction prefixes. */ + insn = amd64_skip_prefixes (insn); + + /* Adjust calls with 32-bit relative addresses as push/jump, with + the address pushed being the location where the original call in + the user program would return to. */ + if (insn[0] == 0xe8) + { + gdb_byte push_buf[16]; + unsigned int ret_addr; + + /* Where "ret" in the original code will return to. */ + ret_addr = oldloc + insn_length; + push_buf[0] = 0x68; /* pushq $... */ + memcpy (&push_buf[1], &ret_addr, 4); + /* Push the push. */ + append_insns (to, 5, push_buf); + + /* Convert the relative call to a relative jump. */ + insn[0] = 0xe9; + + /* Adjust the destination offset. */ + rel32 = extract_signed_integer (insn + 1, 4, byte_order); + newrel = (oldloc - *to) + rel32; + store_signed_integer (insn + 1, 4, newrel, byte_order); + + /* Write the adjusted jump into its displaced location. */ + append_insns (to, 5, insn); + return; + } + + offset = rip_relative_offset (&insn_details); + if (!offset) + { + /* Adjust jumps with 32-bit relative addresses. Calls are + already handled above. */ + if (insn[0] == 0xe9) + offset = 1; + /* Adjust conditional jumps. */ + else if (insn[0] == 0x0f && (insn[1] & 0xf0) == 0x80) + offset = 2; + } + + if (offset) + { + rel32 = extract_signed_integer (insn + offset, 4, byte_order); + newrel = (oldloc - *to) + rel32; + store_signed_integer (insn + offset, 4, newrel, byte_order); + if (debug_displaced) + fprintf_unfiltered (gdb_stdlog, + "Adjusted insn rel32=0x%s at 0x%s to" + " rel32=0x%s at 0x%s\n", + hex_string (rel32), paddress (gdbarch, oldloc), + hex_string (newrel), paddress (gdbarch, *to)); + } + + /* Write the adjusted instruction into its displaced location. */ + append_insns (to, insn_length, buf); +} + /* The maximum number of saved registers. This should include %rip. */ #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS @@ -2363,6 +2480,8 @@ amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch) amd64_regset_from_core_section); set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target); + + set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction); } /* Provide a prototype to silence -Wmissing-prototypes. */ |