aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGuinevere Larsen <guinevere@redhat.com>2024-10-30 16:16:42 -0300
committerGuinevere Larsen <guinevere@redhat.com>2024-11-12 08:48:03 -0300
commit09c964ce90335cb902f2e1bb0e9e48891df380c2 (patch)
treef11b5d7a1fc576f3ca98ec7b3fead7ed522a7b90
parenta4a5f05266b69ebe8172433217d47cc7a737e51d (diff)
downloadbinutils-09c964ce90335cb902f2e1bb0e9e48891df380c2.zip
binutils-09c964ce90335cb902f2e1bb0e9e48891df380c2.tar.gz
binutils-09c964ce90335cb902f2e1bb0e9e48891df380c2.tar.bz2
gdb/testsuite: fix gdb.reverse/i386-avx-reverse.exp with clang
The test gdb.reverse/i386-avx-reverse.exp was changed by the recent commit: commit 5bf288d5a88ab6d3fa9bd7bd070e624afd264dc6 Author: Guinevere Larsen <guinevere@redhat.com> Date: Fri Jul 26 17:31:14 2024 -0300 gdb/record: support AVX instructions VMOVDQ(U|A) when recording In that commit I added a few calls to the instruction vmovdqa to and from memory addresses. Because my local gcc testing always had aligned pointers, I thought this would always work, but clang (and maybe other compilers) might not do the same, which will cause vmovdqa to segfault, and the test to fail spectacularly. This commit fixes that by using the pre-existing precise-aligned-alloc to allocate the dynamic buffers, forcing them to be aligned to the required boundary for vmovdqa instruction to work. The code was then re-shuffled to keep the current clustering of instructions. Approved-By: Tom Tromey <tom@tromey.com>
-rw-r--r--gdb/testsuite/gdb.reverse/i386-avx-reverse.c21
-rw-r--r--gdb/testsuite/gdb.reverse/i386-avx-reverse.exp18
2 files changed, 23 insertions, 16 deletions
diff --git a/gdb/testsuite/gdb.reverse/i386-avx-reverse.c b/gdb/testsuite/gdb.reverse/i386-avx-reverse.c
index b36de10..edd931b 100644
--- a/gdb/testsuite/gdb.reverse/i386-avx-reverse.c
+++ b/gdb/testsuite/gdb.reverse/i386-avx-reverse.c
@@ -82,23 +82,23 @@ vmov_test ()
asm volatile ("vmovq %0, %%xmm15": "=m" (buf1));
/* Test vmovdq style instructions. */
- /* For local and dynamic buffers, we can't guarantee they will be aligned.
+ /* For local and global buffers, we can't guarantee they will be aligned.
However, the aligned and unaligned versions seem to be encoded the same,
- so testing one is enough to validate both. */
+ so testing one is enough to validate both. For safety, though, the
+ dynamic buffers are forced to be 32-bit aligned so vmovdqa can be
+ explicitly tested at least once. */
/* Operations based on local buffers. */
asm volatile ("vmovdqu %0, %%ymm0": : "m"(buf0));
asm volatile ("vmovdqu %%ymm0, %0": "=m"(buf1));
/* Operations based on global buffers. */
- /* Global buffers seem to always be aligned, lets sanity check vmovdqa. */
- asm volatile ("vmovdqa %0, %%ymm15": : "m"(global_buf0));
- asm volatile ("vmovdqa %%ymm15, %0": "=m"(global_buf1));
asm volatile ("vmovdqu %0, %%ymm0": : "m"(global_buf0));
asm volatile ("vmovdqu %%ymm0, %0": "=m"(global_buf1));
/* Operations based on dynamic buffers. */
- /* The dynamic buffers are not aligned, so we skip vmovdqa. */
+ asm volatile ("vmovdqa %0, %%ymm15": : "m"(*dyn_buf0));
+ asm volatile ("vmovdqa %%ymm15, %0": "=m"(*dyn_buf1));
asm volatile ("vmovdqu %0, %%ymm0": : "m"(*dyn_buf0));
asm volatile ("vmovdqu %%ymm0, %0": "=m"(*dyn_buf1));
@@ -210,11 +210,16 @@ vzeroupper_test ()
return 0; /* end vzeroupper_test */
}
+/* This include is used to allocate the dynamic buffer and have
+ the pointers aligned to a 32-bit boundary, so we can test instructions
+ that require aligned memory. */
+#include "precise-aligned-alloc.c"
+
int
main ()
{
- dyn_buf0 = (char *) malloc(sizeof(char) * 32);
- dyn_buf1 = (char *) malloc(sizeof(char) * 32);
+ dyn_buf0 = (char *) precise_aligned_alloc(32, sizeof(char) * 32, NULL);
+ dyn_buf1 = (char *) precise_aligned_alloc(32, sizeof(char) * 32, NULL);
for (int i =0; i < 32; i++)
{
dyn_buf0[i] = 0x20 + (i % 16);
diff --git a/gdb/testsuite/gdb.reverse/i386-avx-reverse.exp b/gdb/testsuite/gdb.reverse/i386-avx-reverse.exp
index 4aefbcd..7ed1293 100644
--- a/gdb/testsuite/gdb.reverse/i386-avx-reverse.exp
+++ b/gdb/testsuite/gdb.reverse/i386-avx-reverse.exp
@@ -39,9 +39,10 @@ standard_testfile
# some targets have leading underscores on assembly symbols.
set additional_flags [gdb_target_symbol_prefix_flags]
+lappend_include_file alloc_lib $srcdir/lib/precise-aligned-alloc.c
if {[prepare_for_testing "failed to prepare" $testfile $srcfile \
- [list debug $additional_flags]]} {
+ [list debug $additional_flags $alloc_lib]]} {
return -1
}
@@ -135,30 +136,31 @@ global decimal
if {[record_full_function "vmov"] == true} {
# Now execute backwards, checking all instructions.
test_one_register "vmovdqa" "ymm0" \
- "0x1f1e1d1c1b1a19181716151413121110, 0x1f1e1d1c1b1a19181716151413121110" \
+ "0x2f2e2d2c2b2a29282726252423222120, 0x2f2e2d2c2b2a29282726252423222120" \
"from register: "
test_one_register "vmovdqu" "ymm15" \
- "0x1f1e1d1c1b1a19181716151413121110, 0x1f1e1d1c1b1a19181716151413121110" \
+ "0x2f2e2d2c2b2a29282726252423222120, 0x2f2e2d2c2b2a29282726252423222120" \
"from register: "
test_one_register "vmovdqu" "ymm0" \
"0x2f2e2d2c2b2a29282726252423222120, 0x2f2e2d2c2b2a29282726252423222120" \
"from register: "
- test_one_memory "vmovdqu" "dyn_buf1" "0x0 .repeats 32 times" \
+ test_one_memory "vmovdqu" "dyn_buf1" \
+ "0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29" \
true "dynamic buffer: "
test_one_register "vmovdqu" "ymm0" \
"0x1f1e1d1c1b1a19181716151413121110, 0x1f1e1d1c1b1a19181716151413121110" \
"dynamic buffer: "
+ test_one_memory "vmovdqa" "dyn_buf1" "0x0 .repeats 32 times" true
+ test_one_register "vmovdqa" "ymm15" "0x0, 0x0"
# Don't check the full buffer because that'd be too long
test_one_memory "vmovdqu" "global_buf1" \
- "0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19" \
- "global buffer: "
+ "0x0 .repeats 32 times" \
+ false "global buffer: "
test_one_register "vmovdqu" "ymm0" \
"0x3f3e3d3c3b3a39383736353433323130, 0x3f3e3d3c3b3a39383736353433323130" \
"global buffer: "
- test_one_memory "vmovdqa" "global_buf1" "0x0 .repeats 32 times"
- test_one_register "vmovdqa" "ymm15" "0x0, 0x0"
test_one_memory "vmovdqu" "buf1" "0x0 .repeats 32 times"
test_one_register "vmovdqu" "ymm0" "0x2726252423222120, 0x0" "local buffer: "