aboutsummaryrefslogtreecommitdiff
path: root/gdb/testsuite/gdb.reverse
diff options
context:
space:
mode:
authorGuinevere Larsen <guinevere@redhat.com>2025-01-06 15:24:25 -0300
committerGuinevere Larsen <guinevere@redhat.com>2025-01-14 14:47:56 -0300
commit2d42150b06529481967e8ffd306a29fce9b24f4c (patch)
treefd70833e74e600bb1ca5d072c61599cd5747073f /gdb/testsuite/gdb.reverse
parentfb3365afbace3ddd562508db9e6f82fe9f258fd1 (diff)
downloadbinutils-2d42150b06529481967e8ffd306a29fce9b24f4c.zip
binutils-2d42150b06529481967e8ffd306a29fce9b24f4c.tar.gz
binutils-2d42150b06529481967e8ffd306a29fce9b24f4c.tar.bz2
gdb/record: add support for floating point vmov instructions
This commit updates GDB's record-full to be able to record vmov[ss|sd] and vmov [u|a] [ps|pd] AVX instructions, and tests for them. Unlike the vmovdq[u|a] instructions, the aligned and unalgined versions of vmov?[ps|pd] have different opcodes. The mechanics of recording them is the same, but the aligned version has opcodes 0x28 and 0x29, while the unaligned has the same opcode as vmov[ss|sd] instruction, 0x10 and 0x11. Approved-By: Guinevere Larsen <guinevere@redhat.com>
Diffstat (limited to 'gdb/testsuite/gdb.reverse')
-rw-r--r--gdb/testsuite/gdb.reverse/i386-avx-reverse.c61
-rw-r--r--gdb/testsuite/gdb.reverse/i386-avx-reverse.exp57
2 files changed, 118 insertions, 0 deletions
diff --git a/gdb/testsuite/gdb.reverse/i386-avx-reverse.c b/gdb/testsuite/gdb.reverse/i386-avx-reverse.c
index 5ce363f..9d82bb0 100644
--- a/gdb/testsuite/gdb.reverse/i386-avx-reverse.c
+++ b/gdb/testsuite/gdb.reverse/i386-avx-reverse.c
@@ -107,6 +107,67 @@ vmov_test ()
asm volatile ("vmovdqu %ymm2, %ymm15");
asm volatile ("vmovdqa %ymm15, %ymm0");
+ /* Testing vmov [ss|sd] instructions. */
+ /* Note, vmovss only works with XMM registers, not YMM registers,
+ according to the intel manual. Also, initializing the variables
+ uses xmm0 in my machine, so we can't test with it, so use xmm1
+ instead. */
+
+ /* Move single precision floats to and from memory. */
+ float f1 = 1.5, f2 = 4.2;
+ asm volatile ("vmovss %0, %%xmm1" : : "m"(f1));
+ asm volatile ("vmovss %0, %%xmm15": : "m"(f2));
+ asm volatile ("vmovss %%xmm1, %0" : "=m"(f2));
+ asm volatile ("vmovss %%xmm15, %0": "=m"(f1));
+
+ asm volatile ("vmovss %xmm15, %xmm1, %xmm2");
+ asm volatile ("vmovss %xmm15, %xmm1, %xmm8");
+ asm volatile ("vmovss %xmm1, %xmm2, %xmm15");
+ asm volatile ("vmovss %xmm2, %xmm15, %xmm1");
+
+ /* Testing double precision floats. */
+ double d1 = -1.5, d2 = -2.5;
+ asm volatile ("vmovsd %0, %%xmm1" : : "m"(d1));
+ asm volatile ("vmovsd %0, %%xmm15": : "m"(d2));
+ asm volatile ("vmovsd %%xmm1, %0" : "=m"(d2));
+ asm volatile ("vmovsd %%xmm15, %0": "=m"(d1));
+
+ asm volatile ("vmovsd %xmm15, %xmm1, %xmm2");
+ asm volatile ("vmovsd %xmm15, %xmm1, %xmm8");
+ asm volatile ("vmovsd %xmm1, %xmm2, %xmm15");
+ asm volatile ("vmovsd %xmm2, %xmm15, %xmm1");
+
+ /* "reset" all the buffers. This doesn't zero them all, but
+ it zeroes the start which lets us ensure the tests see
+ some changes. */
+ asm volatile ("vmovq %%xmm3, %0": "=m" (buf1));
+ asm volatile ("vmovq %%xmm3, %0": "=m" (global_buf1));
+ asm volatile ("vmovq %%xmm3, %0": "=m" (*dyn_buf1));
+
+ /* Testing vmovu[ps|pd] instructions. Even though there are aligned
+ versions of these instructions like vmovdq[u|a], they have different
+ opcodes, meaning they'll need to be tested separately. */
+
+ asm volatile ("vmovups %0, %%xmm0" : : "m"(buf0));
+ asm volatile ("vmovupd %0, %%xmm15" : : "m"(buf1));
+ asm volatile ("vmovupd %%xmm0, %0" : : "m"(buf1));
+ asm volatile ("vmovups %%xmm15, %0" : : "m"(buf1));
+
+ asm volatile ("vmovups %0, %%xmm0" : : "m"(global_buf0));
+ asm volatile ("vmovupd %0, %%xmm15" : : "m"(global_buf1));
+ asm volatile ("vmovupd %%xmm0, %0" : : "m"(global_buf1));
+ asm volatile ("vmovups %%xmm15, %0" : : "m"(global_buf1));
+
+ asm volatile ("vmovups %0, %%xmm0" : : "m"(*dyn_buf0));
+ asm volatile ("vmovupd %0, %%xmm15" : : "m"(*dyn_buf1));
+ asm volatile ("vmovupd %%xmm0, %0" : : "m"(*dyn_buf1));
+ asm volatile ("vmovups %%xmm15, %0" : : "m"(*dyn_buf1));
+
+ asm volatile ("vmovaps %0, %%xmm0" : : "m"(*dyn_buf0));
+ asm volatile ("vmovapd %0, %%xmm15" : : "m"(*dyn_buf1));
+ asm volatile ("vmovapd %%xmm0, %0" : : "m"(*dyn_buf1));
+ asm volatile ("vmovaps %%xmm15, %0" : : "m"(*dyn_buf1));
+
/* We have a return statement to deal with
epilogue in different compilers. */
return 0; /* end vmov_test */
diff --git a/gdb/testsuite/gdb.reverse/i386-avx-reverse.exp b/gdb/testsuite/gdb.reverse/i386-avx-reverse.exp
index cc920d3..45d8984 100644
--- a/gdb/testsuite/gdb.reverse/i386-avx-reverse.exp
+++ b/gdb/testsuite/gdb.reverse/i386-avx-reverse.exp
@@ -145,6 +145,63 @@ global decimal
if {[record_full_function "vmov"] == true} {
# Now execute backwards, checking all instructions.
+
+ test_one_memory "vmovaps" "dyn_buf1" \
+ "0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28" true
+ test_one_memory "vmovapd" "dyn_buf1" \
+ "0x54, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x28" true
+ test_one_register "vmovapd" "xmm15" ".*" "dynamic buffer: "
+ test_one_register "vmovaps" "xmm0" ".*" "dynamic buffer: "
+
+ test_one_memory "vmovups" "dyn_buf1" \
+ "0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28" true
+ test_one_memory "vmovupd" "dyn_buf1" \
+ "0x54, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x28" true
+ test_one_register "vmovupd" "xmm15" ".*" "dynamic buffer: "
+ test_one_register "vmovups" "xmm0" ".*" "dynamic buffer: "
+
+ test_one_memory "vmovups" "global_buf1" \
+ "0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18"
+ test_one_memory "vmovupd" "global_buf1" \
+ "0x54, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x18"
+ test_one_register "vmovupd" "xmm15" ".*" "global buffer: "
+ test_one_register "vmovups" "xmm0" ".*" "global buffer: "
+
+ test_one_memory "vmovups" "buf1" \
+ "0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38"
+ test_one_memory "vmovupd" "buf1" \
+ "0x54, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x38"
+ test_one_register "vmovupd" "xmm15" "0xbff8000000000000" "local buffer: "
+ test_one_register "vmovups" "xmm0" "0xc004000000000000" "local buffer: "
+
+ gdb_test "rs 3" "vmovq %%xmm3.*"
+
+ test_one_register "vmovsd" "xmm1" "0xbff8000000000000" "from register: "
+ test_one_register "vmovsd" "xmm15" "0xc004000000000000" "from register: "
+ test_one_register "vmovsd" "xmm8" "0x40866666" "from register: "
+ test_one_register "vmovsd" "xmm2" "0x40866666" "from register: "
+
+ test_one_memory "vmovsd" "d1" "0xbff8000000000000"
+ test_one_memory "vmovsd" "d2" "0xc004000000000000"
+ test_one_register "vmovsd" "xmm15" "0x3fc00000"
+ test_one_register "vmovsd" "xmm1" "0x40866666"
+
+ # Reverse step over the line initializing the floats.
+ gdb_test "rs" "double .*" "step over double initialization"
+
+ test_one_register "vmovss" "xmm1" "0x3fc00000" "from register: "
+ test_one_register "vmovss" "xmm15" "0x40866666" "from register: "
+ test_one_register "vmovss" "xmm8" "0" "from register: "
+ test_one_register "vmovss" "xmm2" "0" "from register: "
+
+ test_one_memory "vmovss" "f1" "0x3fc00000"
+ test_one_memory "vmovss" "f2" "0x40866666"
+ test_one_register "vmovss" "xmm15" "0x0"
+ test_one_register "vmovss" "xmm1" "0x0"
+
+ # Reverse step over the line initializing the floats.
+ gdb_test "rs" "float .*" "step over float initialization"
+
test_one_register "vmovdqa" "ymm0" \
"0x2f2e2d2c2b2a29282726252423222120, 0x2f2e2d2c2b2a29282726252423222120" \
"from register: "