diff options
author | Guinevere Larsen <guinevere@redhat.com> | 2025-01-15 17:32:23 -0300 |
---|---|---|
committer | Guinevere Larsen <guinevere@redhat.com> | 2025-01-17 15:43:29 -0300 |
commit | 69c9ec4dc1371a40b1a725167999b749ecf6dbfc (patch) | |
tree | 27614b93d8991d92e23ad7f3f90b5d03ac6ee4a8 | |
parent | 56535ffb619362d5f492fddc15aa6aaee47baab4 (diff) | |
download | gdb-69c9ec4dc1371a40b1a725167999b749ecf6dbfc.zip gdb-69c9ec4dc1371a40b1a725167999b749ecf6dbfc.tar.gz gdb-69c9ec4dc1371a40b1a725167999b749ecf6dbfc.tar.bz2 |
gdb/reverse: Fix recording vmov[u|a]p[s|d] instructions
Tom de Vries reported that some of the test for the vmov[u|a]p[s|d] were
failing. In my machine xmm3 was consistently set to 0x54, but apparently
that is different depending on the system. This commit zeroes out xmm3
at the start of the test instead.
While debugging the test failures, I also noticed an issue where the
recording wasn't saving all the required memory. That happened because
vmovs[s|d] shares its opcode with vmovap[s|d], meaning they seem to
share code paths, but the latter encodes memory modification size on
VEX.L whereas the former encodes in VEX.pp. So this commit fixed that,
and made the relevant tests more robust and complete.
Bug: https://sourceware.org/bugzilla/show_bug.cgi?id=32561
Approved-By: Guinevere Larsen <guinevere@redhat.com>
-rw-r--r-- | gdb/i386-tdep.c | 13 | ||||
-rw-r--r-- | gdb/testsuite/gdb.reverse/i386-avx-reverse.c | 17 | ||||
-rw-r--r-- | gdb/testsuite/gdb.reverse/i386-avx-reverse.exp | 19 |
3 files changed, 29 insertions, 20 deletions
diff --git a/gdb/i386-tdep.c b/gdb/i386-tdep.c index 9b8de48..1d6d592 100644 --- a/gdb/i386-tdep.c +++ b/gdb/i386-tdep.c @@ -4842,10 +4842,15 @@ i386_record_vex (struct i386_record_s *ir, uint8_t vex_w, uint8_t vex_r, } else { - /* VEX.pp stores whether we're moving a single or double precision - float. It just happens that pp is exactly one less than - log2(size) so we can use that directly. */ - ir->ot = ir->pp; + /* Opcode 0x29 is trivial, the size of memory written is defined by + VEX.L. Opcode 0x11 can refer to vmovs[s|d] or vmovup[s|d]; they + are differentiated by the most significant bit of VEX.pp, and the + latter works exactly like 0x29, but the former encodes the size + on VEX.pp itself. */ + if (opcode == 0x11 && (ir->pp & 2) != 0) + ir->ot = ir->pp; + else + ir->ot = 4 + ir->l; i386_record_lea_modrm (ir); } break; diff --git a/gdb/testsuite/gdb.reverse/i386-avx-reverse.c b/gdb/testsuite/gdb.reverse/i386-avx-reverse.c index 0c26bcd..8156575 100644 --- a/gdb/testsuite/gdb.reverse/i386-avx-reverse.c +++ b/gdb/testsuite/gdb.reverse/i386-avx-reverse.c @@ -149,24 +149,24 @@ vmov_test () opcodes, meaning they'll need to be tested separately. */ asm volatile ("vmovups %0, %%xmm0" : : "m"(buf0)); - asm volatile ("vmovupd %0, %%xmm15" : : "m"(buf1)); + asm volatile ("vmovupd %0, %%ymm15" : : "m"(buf1)); asm volatile ("vmovupd %%xmm0, %0" : : "m"(buf1)); - asm volatile ("vmovups %%xmm15, %0" : : "m"(buf1)); + asm volatile ("vmovups %%ymm15, %0" : : "m"(buf1)); asm volatile ("vmovups %0, %%xmm0" : : "m"(global_buf0)); - asm volatile ("vmovupd %0, %%xmm15" : : "m"(global_buf1)); + asm volatile ("vmovupd %0, %%ymm15" : : "m"(global_buf1)); asm volatile ("vmovupd %%xmm0, %0" : : "m"(global_buf1)); - asm volatile ("vmovups %%xmm15, %0" : : "m"(global_buf1)); + asm volatile ("vmovups %%ymm15, %0" : : "m"(global_buf1)); asm volatile ("vmovups %0, %%xmm0" : : "m"(*dyn_buf0)); - asm volatile ("vmovupd %0, %%xmm15" : : "m"(*dyn_buf1)); + asm volatile ("vmovupd %0, %%ymm15" : : "m"(*dyn_buf1)); asm volatile ("vmovupd %%xmm0, %0" : : "m"(*dyn_buf1)); - asm volatile ("vmovups %%xmm15, %0" : : "m"(*dyn_buf1)); + asm volatile ("vmovups %%ymm15, %0" : : "m"(*dyn_buf1)); asm volatile ("vmovaps %0, %%xmm0" : : "m"(*dyn_buf0)); - asm volatile ("vmovapd %0, %%xmm15" : : "m"(*dyn_buf1)); + asm volatile ("vmovapd %0, %%ymm15" : : "m"(*dyn_buf1)); asm volatile ("vmovapd %%xmm0, %0" : : "m"(*dyn_buf1)); - asm volatile ("vmovaps %%xmm15, %0" : : "m"(*dyn_buf1)); + asm volatile ("vmovaps %%ymm15, %0" : : "m"(*dyn_buf1)); /* We have a return statement to deal with epilogue in different compilers. */ @@ -438,6 +438,7 @@ main () asm volatile ("vmovq %0, %%xmm0": : "m" (global_buf1)); asm volatile ("vmovq %0, %%xmm1": : "m" (global_buf1)); asm volatile ("vmovq %0, %%xmm2": : "m" (global_buf1)); + asm volatile ("vmovq %0, %%xmm3": : "m" (global_buf1)); asm volatile ("vmovq %0, %%xmm15": : "m" (global_buf1)); vmov_test (); diff --git a/gdb/testsuite/gdb.reverse/i386-avx-reverse.exp b/gdb/testsuite/gdb.reverse/i386-avx-reverse.exp index 2b2371d..f927960 100644 --- a/gdb/testsuite/gdb.reverse/i386-avx-reverse.exp +++ b/gdb/testsuite/gdb.reverse/i386-avx-reverse.exp @@ -76,6 +76,8 @@ proc test_one_general_register {insn register value {prefix ""}} { # Shorthand to test reversing through one instruction and # testing if a variable has the expected value. +# Value should always be the start of the stored values in the memory, +# not something found in the middle of it. # Prefix, if used, should end with a colon and space. proc test_one_memory {insn mem value {dynamic false} {prefix ""}} { @@ -147,31 +149,32 @@ global decimal if {[record_full_function "vmov"] == true} { # Now execute backwards, checking all instructions. + # Explicitly test for the start of the array, since the value repeats. test_one_memory "vmovaps" "dyn_buf1" \ - "0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28" true + "\\\{0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28" true test_one_memory "vmovapd" "dyn_buf1" \ - "0x54, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x28" true + "\\\{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x28" true test_one_register "vmovapd" "xmm15" ".*" "dynamic buffer: " test_one_register "vmovaps" "xmm0" ".*" "dynamic buffer: " test_one_memory "vmovups" "dyn_buf1" \ - "0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28" true + "\\\{0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28" true test_one_memory "vmovupd" "dyn_buf1" \ - "0x54, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x28" true + "\\\{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x28" true test_one_register "vmovupd" "xmm15" ".*" "dynamic buffer: " test_one_register "vmovups" "xmm0" ".*" "dynamic buffer: " test_one_memory "vmovups" "global_buf1" \ - "0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18" + "\\\{0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18" test_one_memory "vmovupd" "global_buf1" \ - "0x54, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x18" + "\\\{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x18" test_one_register "vmovupd" "xmm15" ".*" "global buffer: " test_one_register "vmovups" "xmm0" ".*" "global buffer: " test_one_memory "vmovups" "buf1" \ - "0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38" + "\\\{0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38" test_one_memory "vmovupd" "buf1" \ - "0x54, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x38" + "\\\{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x38" test_one_register "vmovupd" "xmm15" "0xbff8000000000000" "local buffer: " test_one_register "vmovups" "xmm0" "0xc004000000000000" "local buffer: " |