aboutsummaryrefslogtreecommitdiff
path: root/gdb/arch/aarch64-insn.c
diff options
context:
space:
mode:
authorLuis Machado <luis.machado@linaro.org>2020-12-02 11:29:30 -0300
committerLuis Machado <luis.machado@linaro.org>2020-12-04 11:17:00 -0300
commit5382f97180f5be551868449e411a4daaebf232fb (patch)
tree8cc4c6f8a8d23877ce212441c8946122c829c630 /gdb/arch/aarch64-insn.c
parent67748e0f666f0645d7f182e1365f4d9859e55f1d (diff)
downloadgdb-5382f97180f5be551868449e411a4daaebf232fb.zip
gdb-5382f97180f5be551868449e411a4daaebf232fb.tar.gz
gdb-5382f97180f5be551868449e411a4daaebf232fb.tar.bz2
Fix shifting of negative value
When UBSan is enabled, I noticed runtime errors complaining of shifting of negative numbers. This patch fixes this by reusing existing macros from the ARM port. It also removes unused macros from AArch64's port. gdb/ChangeLog: 2020-12-04 Luis Machado <luis.machado@linaro.org> * aarch64-tdep.c (submask, bit, bits): Remove. * arch/aarch64-insn.c (extract_signed_bitfield): Remove. (aarch64_decode_adr, aarch64_decode_b aarch64_decode_bcond) (aarch64_decode_cb, aarch64_decode_tb) (aarch64_decode_ldr_literal): Use sbits to extract a signed immediate. * arch/aarch64-insn.h (submask, bits, bit, sbits): New macros.
Diffstat (limited to 'gdb/arch/aarch64-insn.c')
-rw-r--r--gdb/arch/aarch64-insn.c31
1 files changed, 6 insertions, 25 deletions
diff --git a/gdb/arch/aarch64-insn.c b/gdb/arch/aarch64-insn.c
index 711500a..b7c5a60 100644
--- a/gdb/arch/aarch64-insn.c
+++ b/gdb/arch/aarch64-insn.c
@@ -22,25 +22,6 @@
/* Toggle this file's internal debugging dump. */
bool aarch64_debug = false;
-/* Extract a signed value from a bit field within an instruction
- encoding.
-
- INSN is the instruction opcode.
-
- WIDTH specifies the width of the bit field to extract (in bits).
-
- OFFSET specifies the least significant bit of the field where bits
- are numbered zero counting from least to most significant. */
-
-static int32_t
-extract_signed_bitfield (uint32_t insn, unsigned width, unsigned offset)
-{
- unsigned shift_l = sizeof (int32_t) * 8 - (offset + width);
- unsigned shift_r = sizeof (int32_t) * 8 - width;
-
- return ((int32_t) insn << shift_l) >> shift_r;
-}
-
/* Determine if specified bits within an instruction opcode matches a
specific pattern.
@@ -74,7 +55,7 @@ aarch64_decode_adr (CORE_ADDR addr, uint32_t insn, int *is_adrp,
if (decode_masked_match (insn, 0x1f000000, 0x10000000))
{
uint32_t immlo = (insn >> 29) & 0x3;
- int32_t immhi = extract_signed_bitfield (insn, 19, 5) << 2;
+ int32_t immhi = sbits (insn, 5, 23) * 4;
*is_adrp = (insn >> 31) & 0x1;
*rd = (insn >> 0) & 0x1f;
@@ -118,7 +99,7 @@ aarch64_decode_b (CORE_ADDR addr, uint32_t insn, int *is_bl,
if (decode_masked_match (insn, 0x7c000000, 0x14000000))
{
*is_bl = (insn >> 31) & 0x1;
- *offset = extract_signed_bitfield (insn, 26, 0) << 2;
+ *offset = sbits (insn, 0, 25) * 4;
if (aarch64_debug)
{
@@ -151,7 +132,7 @@ aarch64_decode_bcond (CORE_ADDR addr, uint32_t insn, unsigned *cond,
if (decode_masked_match (insn, 0xff000010, 0x54000000))
{
*cond = (insn >> 0) & 0xf;
- *offset = extract_signed_bitfield (insn, 19, 5) << 2;
+ *offset = sbits (insn, 5, 23) * 4;
if (aarch64_debug)
{
@@ -186,7 +167,7 @@ aarch64_decode_cb (CORE_ADDR addr, uint32_t insn, int *is64, int *is_cbnz,
*rn = (insn >> 0) & 0x1f;
*is64 = (insn >> 31) & 0x1;
*is_cbnz = (insn >> 24) & 0x1;
- *offset = extract_signed_bitfield (insn, 19, 5) << 2;
+ *offset = sbits (insn, 5, 23) * 4;
if (aarch64_debug)
{
@@ -222,7 +203,7 @@ aarch64_decode_tb (CORE_ADDR addr, uint32_t insn, int *is_tbnz,
*rt = (insn >> 0) & 0x1f;
*is_tbnz = (insn >> 24) & 0x1;
*bit = ((insn >> (31 - 4)) & 0x20) | ((insn >> 19) & 0x1f);
- *imm = extract_signed_bitfield (insn, 14, 5) << 2;
+ *imm = sbits (insn, 5, 18) * 4;
if (aarch64_debug)
{
@@ -267,7 +248,7 @@ aarch64_decode_ldr_literal (CORE_ADDR addr, uint32_t insn, int *is_w,
*is64 = (insn >> 30) & 0x1;
*rt = (insn >> 0) & 0x1f;
- *offset = extract_signed_bitfield (insn, 19, 5) << 2;
+ *offset = sbits (insn, 5, 23) * 4;
if (aarch64_debug)
debug_printf ("decode: %s 0x%x %s %s%u, #?\n",