diff options
author | Luis Machado <lgustavo@codesourcery.com> | 2017-02-06 03:12:00 -0600 |
---|---|---|
committer | Luis Machado <lgustavo@codesourcery.com> | 2017-02-06 03:12:00 -0600 |
commit | 20b477a75c00de06a92b9577fd74416699d2c37f (patch) | |
tree | 954421dcc9219afb9b6e186c59f47a9a4f9288a3 | |
parent | 3f7b46f2daa6c396564d786bda9c81e66d4b9278 (diff) | |
download | binutils-20b477a75c00de06a92b9577fd74416699d2c37f.zip binutils-20b477a75c00de06a92b9577fd74416699d2c37f.tar.gz binutils-20b477a75c00de06a92b9577fd74416699d2c37f.tar.bz2 |
[BZ 21005] Add support for Intel 64 rdrand and rdseed record/replay
This patch addresses BZ 21005, which is gdb failing to recognize an rdrand
instruction.
It enables support for both rdrand and rdseed and handles extended register
addressing (R8~R15) for 16-bit, 32-bit and 64-bit.
gdb/ChangeLog
2017-02-06 Luis Machado <lgustavo@codesourcery.com>
* NEWS: Mention support for record/replay of Intel 64 rdrand and
rdseed instructions.
i386-tdep.c (i386_process_record): Handle Intel 64 rdrand and rseed.
gdb/testsuite/ChangeLog:
2017-02-06 Luis Machado <lgustavo@codesourcery.com>
* gdb.reverse/insn-reverse.c: Include insn-reverse-x86.c.
* gdb.reverse/insn-reverse-x86.c: New file.
-rw-r--r-- | gdb/ChangeLog | 6 | ||||
-rw-r--r-- | gdb/NEWS | 3 | ||||
-rw-r--r-- | gdb/i386-tdep.c | 30 | ||||
-rw-r--r-- | gdb/testsuite/ChangeLog | 5 | ||||
-rw-r--r-- | gdb/testsuite/gdb.reverse/insn-reverse-x86.c | 261 | ||||
-rw-r--r-- | gdb/testsuite/gdb.reverse/insn-reverse.c | 2 |
6 files changed, 303 insertions, 4 deletions
diff --git a/gdb/ChangeLog b/gdb/ChangeLog index 3e8f9c8..5751ee7 100644 --- a/gdb/ChangeLog +++ b/gdb/ChangeLog @@ -1,3 +1,9 @@ +2017-02-06 Luis Machado <lgustavo@codesourcery.com> + + * NEWS: Mention support for record/replay of Intel 64 rdrand and + rdseed instructions. + i386-tdep.c (i386_process_record): Handle Intel 64 rdrand and rseed. + 2017-02-06 Ivo Raisr <ivo.raisr@oracle.com> PR tdep/20936 @@ -3,6 +3,9 @@ *** Changes since GDB 7.12 +* GDB now supports recording and replaying rdrand and rdseed Intel 64 + instructions. + * Building GDB and GDBserver now requires a C++11 compiler. For example, GCC 4.8 or later. diff --git a/gdb/i386-tdep.c b/gdb/i386-tdep.c index b86c623..bd87720 100644 --- a/gdb/i386-tdep.c +++ b/gdb/i386-tdep.c @@ -5501,14 +5501,36 @@ i386_process_record (struct gdbarch *gdbarch, struct regcache *regcache, I386_RECORD_FULL_ARCH_LIST_ADD_REG (X86_RECORD_EFLAGS_REGNUM); break; - case 0x0fc7: /* cmpxchg8b */ + case 0x0fc7: /* cmpxchg8b / rdrand / rdseed */ if (i386_record_modrm (&ir)) return -1; if (ir.mod == 3) { - ir.addr -= 2; - opcode = opcode << 8 | ir.modrm; - goto no_support; + /* rdrand and rdseed use the 3 bits of the REG field of ModR/M as + an extended opcode. rdrand has bits 110 (/6) and rdseed + has bits 111 (/7). */ + if (ir.reg == 6 || ir.reg == 7) + { + /* The storage register is described by the 3 R/M bits, but the + REX.B prefix may be used to give access to registers + R8~R15. In this case ir.rex_b + R/M will give us the register + in the range R8~R15. + + REX.W may also be used to access 64-bit registers, but we + already record entire registers and not just partial bits + of them. */ + I386_RECORD_FULL_ARCH_LIST_ADD_REG (ir.rex_b + ir.rm); + /* These instructions also set conditional bits. */ + I386_RECORD_FULL_ARCH_LIST_ADD_REG (X86_RECORD_EFLAGS_REGNUM); + break; + } + else + { + /* We don't handle this particular instruction yet. */ + ir.addr -= 2; + opcode = opcode << 8 | ir.modrm; + goto no_support; + } } I386_RECORD_FULL_ARCH_LIST_ADD_REG (X86_RECORD_REAX_REGNUM); I386_RECORD_FULL_ARCH_LIST_ADD_REG (X86_RECORD_REDX_REGNUM); diff --git a/gdb/testsuite/ChangeLog b/gdb/testsuite/ChangeLog index 9aeec87..f5a0a8e 100644 --- a/gdb/testsuite/ChangeLog +++ b/gdb/testsuite/ChangeLog @@ -1,3 +1,8 @@ +2017-02-06 Luis Machado <lgustavo@codesourcery.com> + + * gdb.reverse/insn-reverse.c: Include insn-reverse-x86.c. + * gdb.reverse/insn-reverse-x86.c: New file. + 2017-02-06 Ivo Raisr <ivo.raisr@oracle.com> PR tdep/20936 diff --git a/gdb/testsuite/gdb.reverse/insn-reverse-x86.c b/gdb/testsuite/gdb.reverse/insn-reverse-x86.c new file mode 100644 index 0000000..7f87392 --- /dev/null +++ b/gdb/testsuite/gdb.reverse/insn-reverse-x86.c @@ -0,0 +1,261 @@ +/* This testcase is part of GDB, the GNU debugger. + + Copyright 2017 Free Software Foundation, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. */ + +#include <cpuid.h> +#include <stdint.h> + +/* 0 if the CPU supports rdrand/rdseed and non-zero otherwise. */ +static unsigned int supports_rdrand; + +/* Returns non-zero if rdrand/rdseed instructions are supported and + zero otherwise. */ + +static unsigned int +check_rdrand_support (void) +{ + unsigned int rdrand_mask = (1 << 30); + unsigned int eax, ebx, ecx, edx; + + __get_cpuid (1, &eax, &ebx, &ecx, &edx); + return ((ecx & rdrand_mask) == rdrand_mask); +} + +/* Test rdrand support for various output registers. */ + +void +rdrand (void) +{ + /* Get a random number from the rdrand assembly instruction. */ + register uint64_t number; + + if (!supports_rdrand) + return; + + /* 16-bit random numbers. */ + __asm__ volatile ("rdrand %%ax;" : "=r" (number)); + __asm__ volatile ("rdrand %%bx;" : "=r" (number)); + __asm__ volatile ("rdrand %%cx;" : "=r" (number)); + __asm__ volatile ("rdrand %%dx;" : "=r" (number)); + + __asm__ volatile ("mov %%di, %%ax;" : "=r" (number)); + __asm__ volatile ("rdrand %%di;" : "=r" (number)); + __asm__ volatile ("mov %%ax, %%di;" : "=r" (number)); + + __asm__ volatile ("mov %%si, %%ax;" : "=r" (number)); + __asm__ volatile ("rdrand %%si;" : "=r" (number)); + __asm__ volatile ("mov %%ax, %%si;" : "=r" (number)); + + __asm__ volatile ("mov %%bp, %%ax;" : "=r" (number)); + __asm__ volatile ("rdrand %%bp;" : "=r" (number)); + __asm__ volatile ("mov %%ax, %%bp;" : "=r" (number)); + + __asm__ volatile ("mov %%sp, %%ax;" : "=r" (number)); + __asm__ volatile ("rdrand %%sp;" : "=r" (number)); + __asm__ volatile ("mov %%ax, %%sp;" : "=r" (number)); + + __asm__ volatile ("rdrand %%r8w;" : "=r" (number)); + __asm__ volatile ("rdrand %%r9w;" : "=r" (number)); + __asm__ volatile ("rdrand %%r10w;" : "=r" (number)); + __asm__ volatile ("rdrand %%r11w;" : "=r" (number)); + __asm__ volatile ("rdrand %%r12w;" : "=r" (number)); + __asm__ volatile ("rdrand %%r13w;" : "=r" (number)); + __asm__ volatile ("rdrand %%r14w;" : "=r" (number)); + __asm__ volatile ("rdrand %%r15w;" : "=r" (number)); + + /* 32-bit random numbers. */ + __asm__ volatile ("rdrand %%eax;" : "=r" (number)); + __asm__ volatile ("rdrand %%ebx;" : "=r" (number)); + __asm__ volatile ("rdrand %%ecx;" : "=r" (number)); + __asm__ volatile ("rdrand %%edx;" : "=r" (number)); + + __asm__ volatile ("mov %%rdi, %%rax;" : "=r" (number)); + __asm__ volatile ("rdrand %%edi;" : "=r" (number)); + __asm__ volatile ("mov %%rax, %%rdi;" : "=r" (number)); + + __asm__ volatile ("mov %%rsi, %%rax;" : "=r" (number)); + __asm__ volatile ("rdrand %%esi;" : "=r" (number)); + __asm__ volatile ("mov %%rax, %%rsi;" : "=r" (number)); + + __asm__ volatile ("mov %%rbp, %%rax;" : "=r" (number)); + __asm__ volatile ("rdrand %%ebp;" : "=r" (number)); + __asm__ volatile ("mov %%rax, %%rbp;" : "=r" (number)); + + __asm__ volatile ("mov %%rsp, %%rax;" : "=r" (number)); + __asm__ volatile ("rdrand %%esp;" : "=r" (number)); + __asm__ volatile ("mov %%rax, %%rsp;" : "=r" (number)); + + __asm__ volatile ("rdrand %%r8d;" : "=r" (number)); + __asm__ volatile ("rdrand %%r9d;" : "=r" (number)); + __asm__ volatile ("rdrand %%r10d;" : "=r" (number)); + __asm__ volatile ("rdrand %%r11d;" : "=r" (number)); + __asm__ volatile ("rdrand %%r12d;" : "=r" (number)); + __asm__ volatile ("rdrand %%r13d;" : "=r" (number)); + __asm__ volatile ("rdrand %%r14d;" : "=r" (number)); + __asm__ volatile ("rdrand %%r15d;" : "=r" (number)); + + /* 64-bit random numbers. */ + __asm__ volatile ("rdrand %%rax;" : "=r" (number)); + __asm__ volatile ("rdrand %%rbx;" : "=r" (number)); + __asm__ volatile ("rdrand %%rcx;" : "=r" (number)); + __asm__ volatile ("rdrand %%rdx;" : "=r" (number)); + + __asm__ volatile ("mov %%rdi, %%rax;" : "=r" (number)); + __asm__ volatile ("rdrand %%rdi;" : "=r" (number)); + __asm__ volatile ("mov %%rax, %%rdi;" : "=r" (number)); + + __asm__ volatile ("mov %%rsi, %%rax;" : "=r" (number)); + __asm__ volatile ("rdrand %%rsi;" : "=r" (number)); + __asm__ volatile ("mov %%rax, %%rsi;" : "=r" (number)); + + __asm__ volatile ("mov %%rbp, %%rax;" : "=r" (number)); + __asm__ volatile ("rdrand %%rbp;" : "=r" (number)); + __asm__ volatile ("mov %%rax, %%rbp;" : "=r" (number)); + + __asm__ volatile ("mov %%rsp, %%rax;" : "=r" (number)); + __asm__ volatile ("rdrand %%rsp;" : "=r" (number)); + __asm__ volatile ("mov %%rax, %%rsp;" : "=r" (number)); + + __asm__ volatile ("rdrand %%r8;" : "=r" (number)); + __asm__ volatile ("rdrand %%r9;" : "=r" (number)); + __asm__ volatile ("rdrand %%r10;" : "=r" (number)); + __asm__ volatile ("rdrand %%r11;" : "=r" (number)); + __asm__ volatile ("rdrand %%r12;" : "=r" (number)); + __asm__ volatile ("rdrand %%r13;" : "=r" (number)); + __asm__ volatile ("rdrand %%r14;" : "=r" (number)); + __asm__ volatile ("rdrand %%r15;" : "=r" (number)); +} + +/* Test rdseed support for various output registers. */ + +void +rdseed (void) +{ + /* Get a random seed from the rdseed assembly instruction. */ + register long seed; + + if (!supports_rdrand) + return; + + /* 16-bit random seeds. */ + __asm__ volatile ("rdseed %%ax;" : "=r" (seed)); + __asm__ volatile ("rdseed %%bx;" : "=r" (seed)); + __asm__ volatile ("rdseed %%cx;" : "=r" (seed)); + __asm__ volatile ("rdseed %%dx;" : "=r" (seed)); + + __asm__ volatile ("mov %%di, %%ax;" : "=r" (seed)); + __asm__ volatile ("rdseed %%di;" : "=r" (seed)); + __asm__ volatile ("mov %%ax, %%di;" : "=r" (seed)); + + __asm__ volatile ("mov %%si, %%ax;" : "=r" (seed)); + __asm__ volatile ("rdseed %%si;" : "=r" (seed)); + __asm__ volatile ("mov %%ax, %%si;" : "=r" (seed)); + + __asm__ volatile ("mov %%bp, %%ax;" : "=r" (seed)); + __asm__ volatile ("rdseed %%bp;" : "=r" (seed)); + __asm__ volatile ("mov %%ax, %%bp;" : "=r" (seed)); + + __asm__ volatile ("mov %%sp, %%ax;" : "=r" (seed)); + __asm__ volatile ("rdseed %%sp;" : "=r" (seed)); + __asm__ volatile ("mov %%ax, %%sp;" : "=r" (seed)); + + __asm__ volatile ("rdseed %%r8w;" : "=r" (seed)); + __asm__ volatile ("rdseed %%r9w;" : "=r" (seed)); + __asm__ volatile ("rdseed %%r10w;" : "=r" (seed)); + __asm__ volatile ("rdseed %%r11w;" : "=r" (seed)); + __asm__ volatile ("rdseed %%r12w;" : "=r" (seed)); + __asm__ volatile ("rdseed %%r13w;" : "=r" (seed)); + __asm__ volatile ("rdseed %%r14w;" : "=r" (seed)); + __asm__ volatile ("rdseed %%r15w;" : "=r" (seed)); + + /* 32-bit random seeds. */ + __asm__ volatile ("rdseed %%eax;" : "=r" (seed)); + __asm__ volatile ("rdseed %%ebx;" : "=r" (seed)); + __asm__ volatile ("rdseed %%ecx;" : "=r" (seed)); + __asm__ volatile ("rdseed %%edx;" : "=r" (seed)); + + __asm__ volatile ("mov %%rdi, %%rax;" : "=r" (seed)); + __asm__ volatile ("rdseed %%edi;" : "=r" (seed)); + __asm__ volatile ("mov %%rax, %%rdi;" : "=r" (seed)); + + __asm__ volatile ("mov %%rsi, %%rax;" : "=r" (seed)); + __asm__ volatile ("rdseed %%esi;" : "=r" (seed)); + __asm__ volatile ("mov %%rax, %%rsi;" : "=r" (seed)); + + __asm__ volatile ("mov %%rbp, %%rax;" : "=r" (seed)); + __asm__ volatile ("rdseed %%ebp;" : "=r" (seed)); + __asm__ volatile ("mov %%rax, %%rbp;" : "=r" (seed)); + + __asm__ volatile ("mov %%rsp, %%rax;" : "=r" (seed)); + __asm__ volatile ("rdseed %%esp;" : "=r" (seed)); + __asm__ volatile ("mov %%rax, %%rsp;" : "=r" (seed)); + + __asm__ volatile ("rdseed %%r8d;" : "=r" (seed)); + __asm__ volatile ("rdseed %%r9d;" : "=r" (seed)); + __asm__ volatile ("rdseed %%r10d;" : "=r" (seed)); + __asm__ volatile ("rdseed %%r11d;" : "=r" (seed)); + __asm__ volatile ("rdseed %%r12d;" : "=r" (seed)); + __asm__ volatile ("rdseed %%r13d;" : "=r" (seed)); + __asm__ volatile ("rdseed %%r14d;" : "=r" (seed)); + __asm__ volatile ("rdseed %%r15d;" : "=r" (seed)); + + /* 64-bit random seeds. */ + __asm__ volatile ("rdseed %%rax;" : "=r" (seed)); + __asm__ volatile ("rdseed %%rbx;" : "=r" (seed)); + __asm__ volatile ("rdseed %%rcx;" : "=r" (seed)); + __asm__ volatile ("rdseed %%rdx;" : "=r" (seed)); + + __asm__ volatile ("mov %%rdi, %%rax;" : "=r" (seed)); + __asm__ volatile ("rdseed %%rdi;" : "=r" (seed)); + __asm__ volatile ("mov %%rax, %%rdi;" : "=r" (seed)); + + __asm__ volatile ("mov %%rsi, %%rax;" : "=r" (seed)); + __asm__ volatile ("rdseed %%rsi;" : "=r" (seed)); + __asm__ volatile ("mov %%rax, %%rsi;" : "=r" (seed)); + + __asm__ volatile ("mov %%rbp, %%rax;" : "=r" (seed)); + __asm__ volatile ("rdseed %%rbp;" : "=r" (seed)); + __asm__ volatile ("mov %%rax, %%rbp;" : "=r" (seed)); + + __asm__ volatile ("mov %%rsp, %%rax;" : "=r" (seed)); + __asm__ volatile ("rdseed %%rsp;" : "=r" (seed)); + __asm__ volatile ("mov %%rax, %%rsp;" : "=r" (seed)); + + __asm__ volatile ("rdseed %%r8;" : "=r" (seed)); + __asm__ volatile ("rdseed %%r9;" : "=r" (seed)); + __asm__ volatile ("rdseed %%r10;" : "=r" (seed)); + __asm__ volatile ("rdseed %%r11;" : "=r" (seed)); + __asm__ volatile ("rdseed %%r12;" : "=r" (seed)); + __asm__ volatile ("rdseed %%r13;" : "=r" (seed)); + __asm__ volatile ("rdseed %%r14;" : "=r" (seed)); + __asm__ volatile ("rdseed %%r15;" : "=r" (seed)); +} + +/* Initialize arch-specific bits. */ + +static void +initialize (void) +{ + /* Initialize supported features. */ + supports_rdrand = check_rdrand_support (); +} + +/* Functions testing instruction decodings. GDB will test all of these. */ +static testcase_ftype testcases[] = +{ + rdrand, + rdseed +}; diff --git a/gdb/testsuite/gdb.reverse/insn-reverse.c b/gdb/testsuite/gdb.reverse/insn-reverse.c index 662a02b..26a22a9 100644 --- a/gdb/testsuite/gdb.reverse/insn-reverse.c +++ b/gdb/testsuite/gdb.reverse/insn-reverse.c @@ -24,6 +24,8 @@ typedef void (*testcase_ftype) (void); #include "insn-reverse-aarch64.c" #elif (defined __arm__) #include "insn-reverse-arm.c" +#elif (defined __x86_64__) || (defined __i386__) +#include "insn-reverse-x86.c" #else /* We get here if the current architecture being tested doesn't have any record/replay instruction decoding tests implemented. */ |