aboutsummaryrefslogtreecommitdiff
path: root/gdb/arch
diff options
context:
space:
mode:
authorYao Qi <yao.qi@linaro.org>2015-10-12 11:28:38 +0100
committerYao Qi <yao.qi@linaro.org>2015-10-12 11:28:38 +0100
commitb6542f81d0894d69e7f12a73b94bf4adead75a5c (patch)
tree931e149e5215f28a2536c30dff6d66016de34beb /gdb/arch
parent246994ce350923199a4c952b38dcee5bcbe8c554 (diff)
downloadgdb-b6542f81d0894d69e7f12a73b94bf4adead75a5c.zip
gdb-b6542f81d0894d69e7f12a73b94bf4adead75a5c.tar.gz
gdb-b6542f81d0894d69e7f12a73b94bf4adead75a5c.tar.bz2
Support displaced stepping in aarch64-linux
This patch is to support displaced stepping in aarch64-linux. A visitor is implemented for displaced stepping, and used to record information to fixup pc after displaced stepping if needed. Some emit_* functions are converted to macros, and moved to arch/aarch64-insn.{c,h} so that they can be shared. gdb: 2015-10-12 Yao Qi <yao.qi@linaro.org> * aarch64-linux-tdep.c: Include arch-utils.h. (aarch64_linux_init_abi): Call set_gdbarch_max_insn_length, set_gdbarch_displaced_step_copy_insn, set_gdbarch_displaced_step_fixup, set_gdbarch_displaced_step_free_closure, set_gdbarch_displaced_step_location, and set_gdbarch_displaced_step_hw_singlestep. * aarch64-tdep.c (struct displaced_step_closure): New. (struct aarch64_displaced_step_data): New. (aarch64_displaced_step_b): New function. (aarch64_displaced_step_b_cond): Likewise. (aarch64_register): Likewise. (aarch64_displaced_step_cb): Likewise. (aarch64_displaced_step_tb): Likewise. (aarch64_displaced_step_adr): Likewise. (aarch64_displaced_step_ldr_literal): Likewise. (aarch64_displaced_step_others): Likewise. (aarch64_displaced_step_copy_insn): Likewise. (aarch64_displaced_step_fixup): Likewise. (aarch64_displaced_step_hw_singlestep): Likewise. * aarch64-tdep.h (DISPLACED_MODIFIED_INSNS): New macro. (aarch64_displaced_step_copy_insn): Declare. (aarch64_displaced_step_fixup): Declare. (aarch64_displaced_step_hw_singlestep): Declare. * arch/aarch64-insn.c (emit_insn): Moved from gdbserver/linux-aarch64-low.c. (emit_load_store): Likewise. * arch/aarch64-insn.h (enum aarch64_opcodes): Moved from gdbserver/linux-aarch64-low.c. (struct aarch64_register): Likewise. (struct aarch64_memory_operand): Likewise. (ENCODE): Likewise. (can_encode_int32): New macro. (emit_b, emit_bcond, emit_cb, emit_ldr, emit_ldrsw): Likewise. (emit_tb, emit_nop): Likewise. (emit_insn): Declare. (emit_load_store): Declare. gdb/gdbserver: 2015-10-12 Yao Qi <yao.qi@linaro.org> * linux-aarch64-low.c (enum aarch64_opcodes): Move to arch/aarch64-insn.h. (struct aarch64_memory_operand): Likewise. (ENCODE): Likewise. (emit_insn): Move to arch/aarch64-insn.c. (emit_b, emit_bcond, emit_cb, emit_tb): Remove. (emit_load_store): Move to arch/aarch64-insn.c. (emit_ldr, emit_ldrb, emit_ldrsw, emit_nop): Remove. (can_encode_int32): Remove.
Diffstat (limited to 'gdb/arch')
-rw-r--r--gdb/arch/aarch64-insn.c58
-rw-r--r--gdb/arch/aarch64-insn.h232
2 files changed, 290 insertions, 0 deletions
diff --git a/gdb/arch/aarch64-insn.c b/gdb/arch/aarch64-insn.c
index d0e88fa..3bc0117 100644
--- a/gdb/arch/aarch64-insn.c
+++ b/gdb/arch/aarch64-insn.c
@@ -328,3 +328,61 @@ aarch64_relocate_instruction (uint32_t insn,
else
visitor->others (insn, data);
}
+
+/* Write a 32-bit unsigned integer INSN info *BUF. Return the number of
+ instructions written (aka. 1). */
+
+int
+emit_insn (uint32_t *buf, uint32_t insn)
+{
+ *buf = insn;
+ return 1;
+}
+
+/* Helper function emitting a load or store instruction. */
+
+int
+emit_load_store (uint32_t *buf, uint32_t size,
+ enum aarch64_opcodes opcode,
+ struct aarch64_register rt,
+ struct aarch64_register rn,
+ struct aarch64_memory_operand operand)
+{
+ uint32_t op;
+
+ switch (operand.type)
+ {
+ case MEMORY_OPERAND_OFFSET:
+ {
+ op = ENCODE (1, 1, 24);
+
+ return emit_insn (buf, opcode | ENCODE (size, 2, 30) | op
+ | ENCODE (operand.index >> 3, 12, 10)
+ | ENCODE (rn.num, 5, 5)
+ | ENCODE (rt.num, 5, 0));
+ }
+ case MEMORY_OPERAND_POSTINDEX:
+ {
+ uint32_t post_index = ENCODE (1, 2, 10);
+
+ op = ENCODE (0, 1, 24);
+
+ return emit_insn (buf, opcode | ENCODE (size, 2, 30) | op
+ | post_index | ENCODE (operand.index, 9, 12)
+ | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
+ }
+ case MEMORY_OPERAND_PREINDEX:
+ {
+ uint32_t pre_index = ENCODE (3, 2, 10);
+
+ op = ENCODE (0, 1, 24);
+
+ return emit_insn (buf, opcode | ENCODE (size, 2, 30) | op
+ | pre_index | ENCODE (operand.index, 9, 12)
+ | ENCODE (rn.num, 5, 5)
+ | ENCODE (rt.num, 5, 0));
+ }
+ default:
+ return 0;
+ }
+}
diff --git a/gdb/arch/aarch64-insn.h b/gdb/arch/aarch64-insn.h
index 47f6715..01a5d73 100644
--- a/gdb/arch/aarch64-insn.h
+++ b/gdb/arch/aarch64-insn.h
@@ -21,6 +21,129 @@
extern int aarch64_debug;
+/* List of opcodes that we need for building the jump pad and relocating
+ an instruction. */
+
+enum aarch64_opcodes
+{
+ /* B 0001 01ii iiii iiii iiii iiii iiii iiii */
+ /* BL 1001 01ii iiii iiii iiii iiii iiii iiii */
+ /* B.COND 0101 0100 iiii iiii iiii iiii iii0 cccc */
+ /* CBZ s011 0100 iiii iiii iiii iiii iiir rrrr */
+ /* CBNZ s011 0101 iiii iiii iiii iiii iiir rrrr */
+ /* TBZ b011 0110 bbbb biii iiii iiii iiir rrrr */
+ /* TBNZ b011 0111 bbbb biii iiii iiii iiir rrrr */
+ B = 0x14000000,
+ BL = 0x80000000 | B,
+ BCOND = 0x40000000 | B,
+ CBZ = 0x20000000 | B,
+ CBNZ = 0x21000000 | B,
+ TBZ = 0x36000000 | B,
+ TBNZ = 0x37000000 | B,
+ /* BLR 1101 0110 0011 1111 0000 00rr rrr0 0000 */
+ BLR = 0xd63f0000,
+ /* RET 1101 0110 0101 1111 0000 00rr rrr0 0000 */
+ RET = 0xd65f0000,
+ /* STP s010 100o o0ii iiii irrr rrrr rrrr rrrr */
+ /* LDP s010 100o o1ii iiii irrr rrrr rrrr rrrr */
+ /* STP (SIMD&VFP) ss10 110o o0ii iiii irrr rrrr rrrr rrrr */
+ /* LDP (SIMD&VFP) ss10 110o o1ii iiii irrr rrrr rrrr rrrr */
+ STP = 0x28000000,
+ LDP = 0x28400000,
+ STP_SIMD_VFP = 0x04000000 | STP,
+ LDP_SIMD_VFP = 0x04000000 | LDP,
+ /* STR ss11 100o 00xi iiii iiii xxrr rrrr rrrr */
+ /* LDR ss11 100o 01xi iiii iiii xxrr rrrr rrrr */
+ /* LDRSW 1011 100o 10xi iiii iiii xxrr rrrr rrrr */
+ STR = 0x38000000,
+ LDR = 0x00400000 | STR,
+ LDRSW = 0x80800000 | STR,
+ /* LDAXR ss00 1000 0101 1111 1111 11rr rrrr rrrr */
+ LDAXR = 0x085ffc00,
+ /* STXR ss00 1000 000r rrrr 0111 11rr rrrr rrrr */
+ STXR = 0x08007c00,
+ /* STLR ss00 1000 1001 1111 1111 11rr rrrr rrrr */
+ STLR = 0x089ffc00,
+ /* MOV s101 0010 1xxi iiii iiii iiii iiir rrrr */
+ /* MOVK s111 0010 1xxi iiii iiii iiii iiir rrrr */
+ MOV = 0x52800000,
+ MOVK = 0x20000000 | MOV,
+ /* ADD s00o ooo1 xxxx xxxx xxxx xxxx xxxx xxxx */
+ /* SUB s10o ooo1 xxxx xxxx xxxx xxxx xxxx xxxx */
+ /* SUBS s11o ooo1 xxxx xxxx xxxx xxxx xxxx xxxx */
+ ADD = 0x01000000,
+ SUB = 0x40000000 | ADD,
+ SUBS = 0x20000000 | SUB,
+ /* AND s000 1010 xx0x xxxx xxxx xxxx xxxx xxxx */
+ /* ORR s010 1010 xx0x xxxx xxxx xxxx xxxx xxxx */
+ /* ORN s010 1010 xx1x xxxx xxxx xxxx xxxx xxxx */
+ /* EOR s100 1010 xx0x xxxx xxxx xxxx xxxx xxxx */
+ AND = 0x0a000000,
+ ORR = 0x20000000 | AND,
+ ORN = 0x00200000 | ORR,
+ EOR = 0x40000000 | AND,
+ /* LSLV s001 1010 110r rrrr 0010 00rr rrrr rrrr */
+ /* LSRV s001 1010 110r rrrr 0010 01rr rrrr rrrr */
+ /* ASRV s001 1010 110r rrrr 0010 10rr rrrr rrrr */
+ LSLV = 0x1ac02000,
+ LSRV = 0x00000400 | LSLV,
+ ASRV = 0x00000800 | LSLV,
+ /* SBFM s001 0011 0nii iiii iiii iirr rrrr rrrr */
+ SBFM = 0x13000000,
+ /* UBFM s101 0011 0nii iiii iiii iirr rrrr rrrr */
+ UBFM = 0x40000000 | SBFM,
+ /* CSINC s001 1010 100r rrrr cccc 01rr rrrr rrrr */
+ CSINC = 0x9a800400,
+ /* MUL s001 1011 000r rrrr 0111 11rr rrrr rrrr */
+ MUL = 0x1b007c00,
+ /* MSR (register) 1101 0101 0001 oooo oooo oooo ooor rrrr */
+ /* MRS 1101 0101 0011 oooo oooo oooo ooor rrrr */
+ MSR = 0xd5100000,
+ MRS = 0x00200000 | MSR,
+ /* HINT 1101 0101 0000 0011 0010 oooo ooo1 1111 */
+ HINT = 0xd503201f,
+ SEVL = (5 << 5) | HINT,
+ WFE = (2 << 5) | HINT,
+ NOP = (0 << 5) | HINT,
+};
+
+/* Representation of a general purpose register of the form xN or wN.
+
+ This type is used by emitting functions that take registers as operands. */
+
+struct aarch64_register
+{
+ unsigned num;
+ int is64;
+};
+
+/* Representation of a memory operand, used for load and store
+ instructions.
+
+ The types correspond to the following variants:
+
+ MEMORY_OPERAND_OFFSET: LDR rt, [rn, #offset]
+ MEMORY_OPERAND_PREINDEX: LDR rt, [rn, #index]!
+ MEMORY_OPERAND_POSTINDEX: LDR rt, [rn], #index */
+
+struct aarch64_memory_operand
+{
+ /* Type of the operand. */
+ enum
+ {
+ MEMORY_OPERAND_OFFSET,
+ MEMORY_OPERAND_PREINDEX,
+ MEMORY_OPERAND_POSTINDEX,
+ } type;
+ /* Index from the base register. */
+ int32_t index;
+};
+
+/* Helper macro to mask and shift a value into a bitfield. */
+
+#define ENCODE(val, size, offset) \
+ ((uint32_t) ((val & ((1ULL << size) - 1)) << offset))
+
int aarch64_decode_adr (CORE_ADDR addr, uint32_t insn, int *is_adrp,
unsigned *rd, int32_t *offset);
@@ -86,4 +209,113 @@ void aarch64_relocate_instruction (uint32_t insn,
const struct aarch64_insn_visitor *visitor,
struct aarch64_insn_data *data);
+#define can_encode_int32(val, bits) \
+ (((val) >> (bits)) == 0 || ((val) >> (bits)) == -1)
+
+/* Write a B or BL instruction into *BUF.
+
+ B #offset
+ BL #offset
+
+ IS_BL specifies if the link register should be updated.
+ OFFSET is the immediate offset from the current PC. It is
+ byte-addressed but should be 4 bytes aligned. It has a limited range of
+ +/- 128MB (26 bits << 2). */
+
+#define emit_b(buf, is_bl, offset) \
+ emit_insn (buf, ((is_bl) ? BL : B) | (ENCODE ((offset) >> 2, 26, 0)))
+
+/* Write a BCOND instruction into *BUF.
+
+ B.COND #offset
+
+ COND specifies the condition field.
+ OFFSET is the immediate offset from the current PC. It is
+ byte-addressed but should be 4 bytes aligned. It has a limited range of
+ +/- 1MB (19 bits << 2). */
+
+#define emit_bcond(buf, cond, offset) \
+ emit_insn (buf, \
+ BCOND | ENCODE ((offset) >> 2, 19, 5) \
+ | ENCODE ((cond), 4, 0))
+
+/* Write a CBZ or CBNZ instruction into *BUF.
+
+ CBZ rt, #offset
+ CBNZ rt, #offset
+
+ IS_CBNZ distinguishes between CBZ and CBNZ instructions.
+ RN is the register to test.
+ OFFSET is the immediate offset from the current PC. It is
+ byte-addressed but should be 4 bytes aligned. It has a limited range of
+ +/- 1MB (19 bits << 2). */
+
+#define emit_cb(buf, is_cbnz, rt, offset) \
+ emit_insn (buf, \
+ ((is_cbnz) ? CBNZ : CBZ) \
+ | ENCODE (rt.is64, 1, 31) /* sf */ \
+ | ENCODE (offset >> 2, 19, 5) /* imm19 */ \
+ | ENCODE (rt.num, 5, 0))
+
+/* Write a LDR instruction into *BUF.
+
+ LDR rt, [rn, #offset]
+ LDR rt, [rn, #index]!
+ LDR rt, [rn], #index
+
+ RT is the register to store.
+ RN is the base address register.
+ OFFSET is the immediate to add to the base address. It is limited to
+ 0 .. 32760 range (12 bits << 3). */
+
+#define emit_ldr(buf, rt, rn, operand) \
+ emit_load_store (buf, rt.is64 ? 3 : 2, LDR, rt, rn, operand)
+
+/* Write a LDRSW instruction into *BUF. The register size is 64-bit.
+
+ LDRSW xt, [rn, #offset]
+ LDRSW xt, [rn, #index]!
+ LDRSW xt, [rn], #index
+
+ RT is the register to store.
+ RN is the base address register.
+ OFFSET is the immediate to add to the base address. It is limited to
+ 0 .. 16380 range (12 bits << 2). */
+
+#define emit_ldrsw(buf, rt, rn, operand) \
+ emit_load_store (buf, 3, LDRSW, rt, rn, operand)
+
+
+/* Write a TBZ or TBNZ instruction into *BUF.
+
+ TBZ rt, #bit, #offset
+ TBNZ rt, #bit, #offset
+
+ IS_TBNZ distinguishes between TBZ and TBNZ instructions.
+ RT is the register to test.
+ BIT is the index of the bit to test in register RT.
+ OFFSET is the immediate offset from the current PC. It is
+ byte-addressed but should be 4 bytes aligned. It has a limited range of
+ +/- 32KB (14 bits << 2). */
+
+#define emit_tb(buf, is_tbnz, bit, rt, offset) \
+ emit_insn (buf, \
+ ((is_tbnz) ? TBNZ: TBZ) \
+ | ENCODE (bit >> 5, 1, 31) /* b5 */ \
+ | ENCODE (bit, 5, 19) /* b40 */ \
+ | ENCODE (offset >> 2, 14, 5) /* imm14 */ \
+ | ENCODE (rt.num, 5, 0))
+
+/* Write a NOP instruction into *BUF. */
+
+#define emit_nop(buf) emit_insn (buf, NOP)
+
+int emit_insn (uint32_t *buf, uint32_t insn);
+
+int emit_load_store (uint32_t *buf, uint32_t size,
+ enum aarch64_opcodes opcode,
+ struct aarch64_register rt,
+ struct aarch64_register rn,
+ struct aarch64_memory_operand operand);
+
#endif