aboutsummaryrefslogtreecommitdiff
path: root/gdb/testsuite/gdb.reverse
diff options
context:
space:
mode:
authorLuis Machado <lgustavo@codesourcery.com>2017-02-06 03:12:00 -0600
committerLuis Machado <lgustavo@codesourcery.com>2017-02-06 03:12:00 -0600
commit20b477a75c00de06a92b9577fd74416699d2c37f (patch)
tree954421dcc9219afb9b6e186c59f47a9a4f9288a3 /gdb/testsuite/gdb.reverse
parent3f7b46f2daa6c396564d786bda9c81e66d4b9278 (diff)
downloadgdb-20b477a75c00de06a92b9577fd74416699d2c37f.zip
gdb-20b477a75c00de06a92b9577fd74416699d2c37f.tar.gz
gdb-20b477a75c00de06a92b9577fd74416699d2c37f.tar.bz2
[BZ 21005] Add support for Intel 64 rdrand and rdseed record/replay
This patch addresses BZ 21005, which is gdb failing to recognize an rdrand instruction. It enables support for both rdrand and rdseed and handles extended register addressing (R8~R15) for 16-bit, 32-bit and 64-bit. gdb/ChangeLog 2017-02-06 Luis Machado <lgustavo@codesourcery.com> * NEWS: Mention support for record/replay of Intel 64 rdrand and rdseed instructions. i386-tdep.c (i386_process_record): Handle Intel 64 rdrand and rseed. gdb/testsuite/ChangeLog: 2017-02-06 Luis Machado <lgustavo@codesourcery.com> * gdb.reverse/insn-reverse.c: Include insn-reverse-x86.c. * gdb.reverse/insn-reverse-x86.c: New file.
Diffstat (limited to 'gdb/testsuite/gdb.reverse')
-rw-r--r--gdb/testsuite/gdb.reverse/insn-reverse-x86.c261
-rw-r--r--gdb/testsuite/gdb.reverse/insn-reverse.c2
2 files changed, 263 insertions, 0 deletions
diff --git a/gdb/testsuite/gdb.reverse/insn-reverse-x86.c b/gdb/testsuite/gdb.reverse/insn-reverse-x86.c
new file mode 100644
index 0000000..7f87392
--- /dev/null
+++ b/gdb/testsuite/gdb.reverse/insn-reverse-x86.c
@@ -0,0 +1,261 @@
+/* This testcase is part of GDB, the GNU debugger.
+
+ Copyright 2017 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+#include <cpuid.h>
+#include <stdint.h>
+
+/* 0 if the CPU supports rdrand/rdseed and non-zero otherwise. */
+static unsigned int supports_rdrand;
+
+/* Returns non-zero if rdrand/rdseed instructions are supported and
+ zero otherwise. */
+
+static unsigned int
+check_rdrand_support (void)
+{
+ unsigned int rdrand_mask = (1 << 30);
+ unsigned int eax, ebx, ecx, edx;
+
+ __get_cpuid (1, &eax, &ebx, &ecx, &edx);
+ return ((ecx & rdrand_mask) == rdrand_mask);
+}
+
+/* Test rdrand support for various output registers. */
+
+void
+rdrand (void)
+{
+ /* Get a random number from the rdrand assembly instruction. */
+ register uint64_t number;
+
+ if (!supports_rdrand)
+ return;
+
+ /* 16-bit random numbers. */
+ __asm__ volatile ("rdrand %%ax;" : "=r" (number));
+ __asm__ volatile ("rdrand %%bx;" : "=r" (number));
+ __asm__ volatile ("rdrand %%cx;" : "=r" (number));
+ __asm__ volatile ("rdrand %%dx;" : "=r" (number));
+
+ __asm__ volatile ("mov %%di, %%ax;" : "=r" (number));
+ __asm__ volatile ("rdrand %%di;" : "=r" (number));
+ __asm__ volatile ("mov %%ax, %%di;" : "=r" (number));
+
+ __asm__ volatile ("mov %%si, %%ax;" : "=r" (number));
+ __asm__ volatile ("rdrand %%si;" : "=r" (number));
+ __asm__ volatile ("mov %%ax, %%si;" : "=r" (number));
+
+ __asm__ volatile ("mov %%bp, %%ax;" : "=r" (number));
+ __asm__ volatile ("rdrand %%bp;" : "=r" (number));
+ __asm__ volatile ("mov %%ax, %%bp;" : "=r" (number));
+
+ __asm__ volatile ("mov %%sp, %%ax;" : "=r" (number));
+ __asm__ volatile ("rdrand %%sp;" : "=r" (number));
+ __asm__ volatile ("mov %%ax, %%sp;" : "=r" (number));
+
+ __asm__ volatile ("rdrand %%r8w;" : "=r" (number));
+ __asm__ volatile ("rdrand %%r9w;" : "=r" (number));
+ __asm__ volatile ("rdrand %%r10w;" : "=r" (number));
+ __asm__ volatile ("rdrand %%r11w;" : "=r" (number));
+ __asm__ volatile ("rdrand %%r12w;" : "=r" (number));
+ __asm__ volatile ("rdrand %%r13w;" : "=r" (number));
+ __asm__ volatile ("rdrand %%r14w;" : "=r" (number));
+ __asm__ volatile ("rdrand %%r15w;" : "=r" (number));
+
+ /* 32-bit random numbers. */
+ __asm__ volatile ("rdrand %%eax;" : "=r" (number));
+ __asm__ volatile ("rdrand %%ebx;" : "=r" (number));
+ __asm__ volatile ("rdrand %%ecx;" : "=r" (number));
+ __asm__ volatile ("rdrand %%edx;" : "=r" (number));
+
+ __asm__ volatile ("mov %%rdi, %%rax;" : "=r" (number));
+ __asm__ volatile ("rdrand %%edi;" : "=r" (number));
+ __asm__ volatile ("mov %%rax, %%rdi;" : "=r" (number));
+
+ __asm__ volatile ("mov %%rsi, %%rax;" : "=r" (number));
+ __asm__ volatile ("rdrand %%esi;" : "=r" (number));
+ __asm__ volatile ("mov %%rax, %%rsi;" : "=r" (number));
+
+ __asm__ volatile ("mov %%rbp, %%rax;" : "=r" (number));
+ __asm__ volatile ("rdrand %%ebp;" : "=r" (number));
+ __asm__ volatile ("mov %%rax, %%rbp;" : "=r" (number));
+
+ __asm__ volatile ("mov %%rsp, %%rax;" : "=r" (number));
+ __asm__ volatile ("rdrand %%esp;" : "=r" (number));
+ __asm__ volatile ("mov %%rax, %%rsp;" : "=r" (number));
+
+ __asm__ volatile ("rdrand %%r8d;" : "=r" (number));
+ __asm__ volatile ("rdrand %%r9d;" : "=r" (number));
+ __asm__ volatile ("rdrand %%r10d;" : "=r" (number));
+ __asm__ volatile ("rdrand %%r11d;" : "=r" (number));
+ __asm__ volatile ("rdrand %%r12d;" : "=r" (number));
+ __asm__ volatile ("rdrand %%r13d;" : "=r" (number));
+ __asm__ volatile ("rdrand %%r14d;" : "=r" (number));
+ __asm__ volatile ("rdrand %%r15d;" : "=r" (number));
+
+ /* 64-bit random numbers. */
+ __asm__ volatile ("rdrand %%rax;" : "=r" (number));
+ __asm__ volatile ("rdrand %%rbx;" : "=r" (number));
+ __asm__ volatile ("rdrand %%rcx;" : "=r" (number));
+ __asm__ volatile ("rdrand %%rdx;" : "=r" (number));
+
+ __asm__ volatile ("mov %%rdi, %%rax;" : "=r" (number));
+ __asm__ volatile ("rdrand %%rdi;" : "=r" (number));
+ __asm__ volatile ("mov %%rax, %%rdi;" : "=r" (number));
+
+ __asm__ volatile ("mov %%rsi, %%rax;" : "=r" (number));
+ __asm__ volatile ("rdrand %%rsi;" : "=r" (number));
+ __asm__ volatile ("mov %%rax, %%rsi;" : "=r" (number));
+
+ __asm__ volatile ("mov %%rbp, %%rax;" : "=r" (number));
+ __asm__ volatile ("rdrand %%rbp;" : "=r" (number));
+ __asm__ volatile ("mov %%rax, %%rbp;" : "=r" (number));
+
+ __asm__ volatile ("mov %%rsp, %%rax;" : "=r" (number));
+ __asm__ volatile ("rdrand %%rsp;" : "=r" (number));
+ __asm__ volatile ("mov %%rax, %%rsp;" : "=r" (number));
+
+ __asm__ volatile ("rdrand %%r8;" : "=r" (number));
+ __asm__ volatile ("rdrand %%r9;" : "=r" (number));
+ __asm__ volatile ("rdrand %%r10;" : "=r" (number));
+ __asm__ volatile ("rdrand %%r11;" : "=r" (number));
+ __asm__ volatile ("rdrand %%r12;" : "=r" (number));
+ __asm__ volatile ("rdrand %%r13;" : "=r" (number));
+ __asm__ volatile ("rdrand %%r14;" : "=r" (number));
+ __asm__ volatile ("rdrand %%r15;" : "=r" (number));
+}
+
+/* Test rdseed support for various output registers. */
+
+void
+rdseed (void)
+{
+ /* Get a random seed from the rdseed assembly instruction. */
+ register long seed;
+
+ if (!supports_rdrand)
+ return;
+
+ /* 16-bit random seeds. */
+ __asm__ volatile ("rdseed %%ax;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%bx;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%cx;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%dx;" : "=r" (seed));
+
+ __asm__ volatile ("mov %%di, %%ax;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%di;" : "=r" (seed));
+ __asm__ volatile ("mov %%ax, %%di;" : "=r" (seed));
+
+ __asm__ volatile ("mov %%si, %%ax;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%si;" : "=r" (seed));
+ __asm__ volatile ("mov %%ax, %%si;" : "=r" (seed));
+
+ __asm__ volatile ("mov %%bp, %%ax;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%bp;" : "=r" (seed));
+ __asm__ volatile ("mov %%ax, %%bp;" : "=r" (seed));
+
+ __asm__ volatile ("mov %%sp, %%ax;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%sp;" : "=r" (seed));
+ __asm__ volatile ("mov %%ax, %%sp;" : "=r" (seed));
+
+ __asm__ volatile ("rdseed %%r8w;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%r9w;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%r10w;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%r11w;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%r12w;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%r13w;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%r14w;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%r15w;" : "=r" (seed));
+
+ /* 32-bit random seeds. */
+ __asm__ volatile ("rdseed %%eax;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%ebx;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%ecx;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%edx;" : "=r" (seed));
+
+ __asm__ volatile ("mov %%rdi, %%rax;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%edi;" : "=r" (seed));
+ __asm__ volatile ("mov %%rax, %%rdi;" : "=r" (seed));
+
+ __asm__ volatile ("mov %%rsi, %%rax;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%esi;" : "=r" (seed));
+ __asm__ volatile ("mov %%rax, %%rsi;" : "=r" (seed));
+
+ __asm__ volatile ("mov %%rbp, %%rax;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%ebp;" : "=r" (seed));
+ __asm__ volatile ("mov %%rax, %%rbp;" : "=r" (seed));
+
+ __asm__ volatile ("mov %%rsp, %%rax;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%esp;" : "=r" (seed));
+ __asm__ volatile ("mov %%rax, %%rsp;" : "=r" (seed));
+
+ __asm__ volatile ("rdseed %%r8d;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%r9d;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%r10d;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%r11d;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%r12d;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%r13d;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%r14d;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%r15d;" : "=r" (seed));
+
+ /* 64-bit random seeds. */
+ __asm__ volatile ("rdseed %%rax;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%rbx;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%rcx;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%rdx;" : "=r" (seed));
+
+ __asm__ volatile ("mov %%rdi, %%rax;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%rdi;" : "=r" (seed));
+ __asm__ volatile ("mov %%rax, %%rdi;" : "=r" (seed));
+
+ __asm__ volatile ("mov %%rsi, %%rax;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%rsi;" : "=r" (seed));
+ __asm__ volatile ("mov %%rax, %%rsi;" : "=r" (seed));
+
+ __asm__ volatile ("mov %%rbp, %%rax;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%rbp;" : "=r" (seed));
+ __asm__ volatile ("mov %%rax, %%rbp;" : "=r" (seed));
+
+ __asm__ volatile ("mov %%rsp, %%rax;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%rsp;" : "=r" (seed));
+ __asm__ volatile ("mov %%rax, %%rsp;" : "=r" (seed));
+
+ __asm__ volatile ("rdseed %%r8;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%r9;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%r10;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%r11;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%r12;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%r13;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%r14;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%r15;" : "=r" (seed));
+}
+
+/* Initialize arch-specific bits. */
+
+static void
+initialize (void)
+{
+ /* Initialize supported features. */
+ supports_rdrand = check_rdrand_support ();
+}
+
+/* Functions testing instruction decodings. GDB will test all of these. */
+static testcase_ftype testcases[] =
+{
+ rdrand,
+ rdseed
+};
diff --git a/gdb/testsuite/gdb.reverse/insn-reverse.c b/gdb/testsuite/gdb.reverse/insn-reverse.c
index 662a02b..26a22a9 100644
--- a/gdb/testsuite/gdb.reverse/insn-reverse.c
+++ b/gdb/testsuite/gdb.reverse/insn-reverse.c
@@ -24,6 +24,8 @@ typedef void (*testcase_ftype) (void);
#include "insn-reverse-aarch64.c"
#elif (defined __arm__)
#include "insn-reverse-arm.c"
+#elif (defined __x86_64__) || (defined __i386__)
+#include "insn-reverse-x86.c"
#else
/* We get here if the current architecture being tested doesn't have any
record/replay instruction decoding tests implemented. */