aboutsummaryrefslogtreecommitdiff
path: root/gdb/testsuite/gdb.reverse/i386-avx-reverse.c
diff options
context:
space:
mode:
Diffstat (limited to 'gdb/testsuite/gdb.reverse/i386-avx-reverse.c')
-rw-r--r--gdb/testsuite/gdb.reverse/i386-avx-reverse.c359
1 files changed, 358 insertions, 1 deletions
diff --git a/gdb/testsuite/gdb.reverse/i386-avx-reverse.c b/gdb/testsuite/gdb.reverse/i386-avx-reverse.c
index a37b65a..a3d6427 100644
--- a/gdb/testsuite/gdb.reverse/i386-avx-reverse.c
+++ b/gdb/testsuite/gdb.reverse/i386-avx-reverse.c
@@ -30,6 +30,18 @@ char global_buf1[] = {0, 0, 0, 0, 0, 0, 0, 0,
char *dyn_buf0;
char *dyn_buf1;
+ /* Zero memory regions again, so that future tests can update them
+ without worry. */
+void
+reset_buffers ()
+{
+ for (int i = 0; i < 32; i++)
+ {
+ global_buf1[i] = 0;
+ dyn_buf1[i] = 0;
+ }
+}
+
int
vmov_test ()
{
@@ -168,6 +180,22 @@ vmov_test ()
asm volatile ("vmovapd %%xmm0, %0" : : "m"(*dyn_buf1));
asm volatile ("vmovaps %%ymm15, %0" : : "m"(*dyn_buf1));
+ /* Testing vmov[hl|lh]ps and vmov[h|l]pd. */
+ asm volatile ("vmovhlps %xmm1, %xmm8, %xmm0");
+ asm volatile ("vmovhlps %xmm1, %xmm2, %xmm15");
+ asm volatile ("vmovlhps %xmm1, %xmm8, %xmm0");
+ asm volatile ("vmovlhps %xmm1, %xmm2, %xmm15");
+
+ asm volatile ("vmovhps %0, %%xmm1, %%xmm0" : : "m"(buf0));
+ asm volatile ("vmovhps %%xmm0, %0" : "=m" (buf1));
+ asm volatile ("vmovhpd %0, %%xmm1, %%xmm15" : : "m"(global_buf0));
+ asm volatile ("vmovhpd %%xmm15, %0" : "=m" (global_buf1));
+ asm volatile ("vmovlpd %0, %%xmm1, %%xmm15" : : "m"(*dyn_buf0));
+ asm volatile ("vmovlpd %%xmm15, %0" : "=m" (*dyn_buf1));
+
+ asm volatile ("vmovddup %xmm1, %xmm15");
+ asm volatile ("vmovddup %ymm2, %ymm0");
+
/* We have a return statement to deal with
epilogue in different compilers. */
return 0; /* end vmov_test */
@@ -245,7 +273,7 @@ vpunpck_test ()
return 0; /* end vpunpck_test */
}
-/* Test if we can record vpbroadcast instructions. */
+/* Test if we can record vpbroadcast and vbroadcast instructions. */
int
vpbroadcast_test ()
{
@@ -268,6 +296,14 @@ vpbroadcast_test ()
asm volatile ("vpbroadcastq %xmm1, %ymm0");
asm volatile ("vpbroadcastq %xmm1, %ymm15");
+ asm volatile ("vbroadcastss %xmm1, %xmm0");
+ asm volatile ("vbroadcastss %xmm1, %ymm15");
+ asm volatile ("vbroadcastss %0, %%ymm0" : : "m" (global_buf0));
+ asm volatile ("vbroadcastss %0, %%xmm15": : "m" (dyn_buf0));
+ asm volatile ("vbroadcastsd %xmm1, %ymm0");
+ asm volatile ("vbroadcastsd %0, %%ymm15": : "m" (global_buf0));
+ asm volatile ("vbroadcastf128 %0, %%ymm0" : : "m" (dyn_buf0));
+
/* We have a return statement to deal with
epilogue in different compilers. */
return 0; /* end vpbroadcast_test */
@@ -372,6 +408,7 @@ arith_test ()
/* Using GDB, load these values onto registers for testing.
ymm0.v8_float = {0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5}
ymm1.v8_float = {0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5}
+ ymm2.v2_int128 = {0x0, 0x0}
ymm15.v2_int128 = {0x0, 0x0}
this way it's easy to confirm we're undoing things correctly. */
asm volatile ("vaddps %xmm0, %xmm1, %xmm15");
@@ -416,9 +453,318 @@ arith_test ()
asm volatile ("vmaxss %xmm0, %xmm1, %xmm15");
asm volatile ("vmaxsd %xmm0, %xmm1, %xmm15");
+ /* Some sanity checks for other arithmetic instructions. */
+ asm volatile ("vpaddb %xmm0, %xmm1, %xmm2");
+ asm volatile ("vpaddw %xmm0, %xmm1, %xmm15");
+ asm volatile ("vpaddd %ymm0, %ymm1, %ymm2");
+ asm volatile ("vpaddq %ymm0, %ymm1, %ymm15");
+
+ asm volatile ("vpmullw %xmm0, %xmm1, %xmm2");
+ asm volatile ("vpmulld %xmm0, %xmm1, %xmm15");
+ asm volatile ("vpmulhw %ymm0, %ymm1, %ymm2");
+ asm volatile ("vpmulhuw %ymm0, %ymm1, %ymm15");
+ asm volatile ("vpmuludq %ymm0, %ymm1, %ymm15");
+
+ asm volatile ("vxorps %xmm0, %xmm1, %xmm2");
+ asm volatile ("vxorpd %ymm0, %ymm1, %ymm2");
+ asm volatile ("vpand %xmm0, %xmm1, %xmm15");
+ asm volatile ("vpandn %ymm0, %ymm1, %ymm15");
+
+ asm volatile ("vpsadbw %xmm0, %xmm1, %xmm2");
+ asm volatile ("vpsadbw %ymm0, %ymm1, %ymm15");
+
return 0; /* end arith_test */
}
+int
+vaddsubpd_test ()
+{
+ /* start vaddsubpd_test */
+ /* YMM test. */
+ asm volatile ("vaddsubpd %ymm15,%ymm1,%ymm0");
+ asm volatile ("vaddsubpd %ymm0,%ymm1,%ymm15");
+ asm volatile ("vaddsubpd %ymm2,%ymm3,%ymm4");
+
+ /* XMM test. */
+ asm volatile ("vaddsubpd %xmm15,%xmm1,%xmm2");
+ asm volatile ("vaddsubpd %xmm0,%xmm1,%xmm10");
+ return 0; /* end vaddsubpd_test */
+}
+
+int
+vaddsubps_test ()
+{
+ /* start vaddsubps_test */
+ /* YMM test. */
+ asm volatile ("vaddsubps %ymm15,%ymm1,%ymm2");
+ asm volatile ("vaddsubps %ymm0,%ymm1,%ymm10");
+ asm volatile ("vaddsubps %ymm2,%ymm3,%ymm4");
+
+ /* XMM test. */
+ asm volatile ("vaddsubps %xmm0,%xmm1,%xmm15");
+ asm volatile ("vaddsubps %xmm15,%xmm1,%xmm0");
+ return 0; /* end vaddsubps_test */
+}
+
+
+/* Test record shifting instructions. */
+int
+shift_test ()
+{
+ /* start shift_test. */
+ /* Using GDB, load these values onto registers for testing.
+ ymm0.v2_int128 = {0, 0}
+ ymm1.v16_int16 = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}
+ xmm2.uint128 = 1
+ ymm15.v2_int128 = {0x0, 0x0}
+ this way it's easy to confirm we're undoing things correctly. */
+
+ asm volatile ("vpsllw $1, %xmm1, %xmm0");
+ asm volatile ("vpsllw %xmm2, %ymm1, %ymm0");
+ asm volatile ("vpslld $3, %ymm1, %ymm15");
+ asm volatile ("vpslld %xmm2, %xmm1, %xmm15");
+ asm volatile ("vpsllq $5, %xmm1, %xmm15");
+ asm volatile ("vpsllq %xmm2, %ymm1, %ymm15");
+
+ asm volatile ("vpsraw $1, %xmm1, %xmm0");
+ asm volatile ("vpsraw %xmm2, %ymm1, %ymm0");
+ asm volatile ("vpsrad $3, %ymm1, %ymm15");
+ asm volatile ("vpsrad %xmm2, %xmm1, %xmm15");
+
+ asm volatile ("vpsrlw $1, %xmm1, %xmm0");
+ asm volatile ("vpsrlw %xmm2, %ymm1, %ymm0");
+ asm volatile ("vpsrld $3, %ymm1, %ymm15");
+ asm volatile ("vpsrld %xmm2, %xmm1, %xmm15");
+ asm volatile ("vpsrlq $5, %xmm1, %xmm15");
+ asm volatile ("vpsrlq %xmm2, %ymm1, %ymm15");
+
+ /* The dq version is treated separately in the manual, so
+ we test it separately just to be sure. */
+ asm volatile ("vpslldq $1, %xmm1, %xmm0");
+ asm volatile ("vpslldq $2, %ymm1, %ymm0");
+ asm volatile ("vpslldq $3, %xmm1, %xmm15");
+ asm volatile ("vpslldq $4, %ymm1, %ymm15");
+
+ asm volatile ("vpsrldq $1, %xmm1, %xmm0");
+ asm volatile ("vpsrldq $2, %ymm1, %ymm0");
+ asm volatile ("vpsrldq $3, %xmm1, %xmm15");
+ asm volatile ("vpsrldq $4, %ymm1, %ymm15");
+
+ return 0; /* end shift_test */
+}
+
+int
+shuffle_test ()
+{
+ /* start shuffle_test. */
+ /* Using GDB, load these values onto registers for testing.
+ ymm0.v2_int128 = {0, 0}
+ ymm1.v16_int16 = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}
+ ymm2.v16_int15 = {17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32}
+ ymm15.v2_int128 = {0x0, 0x0}
+ this way it's easy to confirm we're undoing things correctly. */
+
+ asm volatile ("vpshufb %xmm1, %xmm2, %xmm0");
+ asm volatile ("vpshufb %ymm1, %ymm2, %ymm15");
+ asm volatile ("vpshufd $1, %ymm2, %ymm0");
+ asm volatile ("vpshufd $2, %xmm2, %xmm15");
+
+ asm volatile ("vpshufhw $3, %xmm2, %xmm0");
+ asm volatile ("vpshufhw $4, %ymm2, %ymm15");
+ asm volatile ("vpshuflw $5, %ymm2, %ymm0");
+ asm volatile ("vpshuflw $6, %xmm2, %xmm15");
+
+ asm volatile ("vshufps $1, %xmm1, %xmm2, %xmm0");
+ asm volatile ("vshufps $2, %ymm1, %ymm2, %ymm15");
+ asm volatile ("vshufpd $4, %ymm1, %ymm2, %ymm0");
+ asm volatile ("vshufpd $8, %xmm1, %xmm2, %xmm15");
+
+ return 0; /* end shuffle_test */
+}
+
+int
+permute_test ()
+{
+ /* start permute_test. */
+ /* Using GDB, load these values onto registers for testing.
+ ymm0.v2_int128 = {0, 0}
+ ymm1.v16_int16 = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}
+ ymm2.v16_int16 = {17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32}
+ ymm15.v2_int128 = {0x0, 0x0}
+ eax = 0
+ this way it's easy to confirm we're undoing things correctly. */
+ asm volatile ("vperm2f128 $1, %ymm1, %ymm2, %ymm0");
+ asm volatile ("vperm2f128 $0, %ymm1, %ymm2, %ymm15");
+ asm volatile ("vperm2i128 $1, %ymm2, %ymm1, %ymm0");
+ asm volatile ("vperm2i128 $0, %ymm2, %ymm1, %ymm15");
+
+ asm volatile ("vpermd %ymm1, %ymm2, %ymm0");
+ asm volatile ("vpermd %ymm1, %ymm2, %ymm15");
+ asm volatile ("vpermq $1, %ymm1, %ymm0");
+ asm volatile ("vpermq $0, %ymm2, %ymm15");
+
+ asm volatile ("vpermilpd %ymm1, %ymm2, %ymm0");
+ asm volatile ("vpermilpd %xmm1, %xmm2, %xmm15");
+ asm volatile ("vpermilpd $1, %ymm2, %ymm15");
+ asm volatile ("vpermilpd $0, %xmm2, %xmm0");
+ asm volatile ("vpermilps %ymm1, %ymm2, %ymm0");
+ asm volatile ("vpermilps %xmm1, %xmm2, %xmm15");
+ asm volatile ("vpermilps $1, %ymm2, %ymm15");
+ asm volatile ("vpermilps $0, %xmm2, %xmm0");
+
+ asm volatile ("vpermpd $0, %ymm1, %ymm15");
+ asm volatile ("vpermpd $0, %ymm2, %ymm0");
+ asm volatile ("vpermps %ymm1, %ymm2, %ymm0");
+ asm volatile ("vpermps %ymm1, %ymm2, %ymm15");
+
+ return 0; /* end permute_test */
+}
+
+int
+extract_insert_test ()
+{
+ /* start extract_insert_test. */
+ /* Using GDB, load these values onto registers for testing.
+ ymm0.v2_int128 = {0, 0}
+ ymm1.v16_int16 = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}
+ xmm2.uint128 = 0xcafe
+ ymm15.v2_int128 = {0x0, 0x0}
+ eax = 0
+ this way it's easy to confirm we're undoing things correctly. */
+
+ asm volatile ("vinserti128 $1, %xmm2, %ymm1, %ymm0");
+ asm volatile ("vinsertf128 $0, %xmm2, %ymm1, %ymm15");
+ asm volatile ("vextracti128 $1, %ymm1, %xmm0");
+ asm volatile ("vextractf128 $0, %ymm1, %xmm15");
+ asm volatile ("vinsertps $16, %xmm2, %xmm1, %xmm0");
+ asm volatile ("vextractps $0, %xmm2, %rax");
+
+ asm volatile ("vpextrb $5, %xmm1, %rax");
+ asm volatile ("vpextrb $4, %%xmm1, %0" : "=m" (global_buf1));
+ asm volatile ("vpextrd $3, %xmm1, %eax");
+ asm volatile ("vpextrd $2, %%xmm1, %0" : "=m" (global_buf1));
+ asm volatile ("vpextrq $1, %xmm1, %rax");
+ asm volatile ("vpextrq $0, %%xmm1, %0" : "=m" (global_buf1));
+
+ asm volatile ("vpinsrb $3, %rax, %xmm2, %xmm0");
+ asm volatile ("vpinsrw $2, %eax, %xmm2, %xmm15");
+ asm volatile ("vpinsrd $1, %eax, %xmm2, %xmm0");
+ asm volatile ("vpinsrq $0, %rax, %xmm2, %xmm15");
+
+ /* vpextrw has completely different mechanics to other vpextr
+ instructions, so separate them for ease of testing later. */
+ asm volatile ("vpextrw $1, %xmm1, %eax");
+ asm volatile ("vpextrw $1, %%xmm1, %0" : "=m" (global_buf1));
+
+ return 0; /* end extract_insert_test */
+}
+
+int
+blend_test ()
+{
+ /* start blend_test. */
+ /* Using GDB, load these values onto registers for testing.
+ ymm0.v2_int128 = {0, 0}
+ ymm1.v16_int16 = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}
+ ymm2.v16_int16 = {17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32}
+ ymm15.v2_int128 = {0x0, 0x0}
+ this way it's easy to confirm we're undoing things correctly. */
+
+ asm volatile ("vblendps $5, %xmm1, %xmm2, %xmm0");
+ asm volatile ("vblendpd $10, %ymm1, %ymm2, %ymm15");
+ asm volatile ("vblendvps %ymm15, %ymm1, %ymm2, %ymm0");
+ asm volatile ("vblendvpd %xmm0, %xmm1, %xmm2, %xmm15");
+
+ asm volatile ("vpblendw $94, %ymm1, %ymm2, %ymm15");
+ asm volatile ("vpblendw $47, %xmm1, %xmm2, %xmm0");
+ asm volatile ("vpblendd $22, %ymm1, %ymm2, %ymm0");
+ asm volatile ("vpblendd $11, %xmm1, %xmm2, %xmm15");
+ asm volatile ("vpblendvb %xmm0, %xmm1, %xmm2, %xmm15");
+ asm volatile ("vpblendvb %ymm0, %ymm1, %ymm2, %ymm0");
+
+ return 0; /* end blend_test */
+}
+
+int
+compare_test ()
+{
+ /* start compare_test. */
+ /* Using GDB, load these values onto registers for testing.
+ xmm0.v4_float = {0, 1.5, 2, 0}
+ xmm1.v4_float = {0, 1, 2.5, -1}
+ xmm15.v4_float = {-1, -2, 10, 100}
+ eflags = 2
+ eflags can't be set to some values, if we set it to 0, it'll
+ be reset to 2, so set it to that directly to make results less
+ confusing.
+ this way it's easy to confirm we're undoing things correctly. */
+
+ asm volatile ("vcomisd %xmm0, %xmm1");
+ asm volatile ("vcomiss %xmm15, %xmm1");
+ asm volatile ("vucomiss %xmm1, %xmm15");
+ asm volatile ("vucomisd %xmm15, %xmm0");
+
+ return 0; /* end compare_test */
+}
+
+int
+pack_test ()
+{
+ /* start pack_test. */
+ /* Using GDB, load these values onto registers for testing.
+ xmm0.v4_float = {0, 1.5, 2, 0}
+ xmm1.v4_float = {0, 1, 2.5, -1}
+ xmm2.v4_float = {0, 1, 2.5, -1}
+ xmm15.v4_float = {-1, -2, 10, 100}
+ this way it's easy to confirm we're undoing things correctly. */
+
+ asm volatile ("vpacksswb %xmm1, %xmm2, %xmm0");
+ asm volatile ("vpacksswb %ymm1, %ymm2, %ymm15");
+ asm volatile ("vpackssdw %xmm1, %xmm2, %xmm15");
+ asm volatile ("vpackssdw %ymm1, %ymm2, %ymm0");
+ asm volatile ("vpackuswb %xmm1, %xmm2, %xmm0");
+ asm volatile ("vpackuswb %ymm1, %ymm2, %ymm15");
+ asm volatile ("vpackusdw %xmm1, %xmm2, %xmm15");
+ asm volatile ("vpackusdw %ymm1, %ymm2, %ymm0");
+
+ return 0; /* end pack_test */
+}
+
+int
+convert_test ()
+{
+ /* start convert_test. */
+ /* Using GDB, load these values onto registers for testing.
+ xmm0.v2_int128 = {0, 0}
+ xmm1.v4_float = {0, 1, 2.5, 10}
+ xmm15.v2_int128 = {0, 0}
+ ecx = -1
+ ebx = 0
+ this way it's easy to confirm we're undoing things correctly. */
+
+ asm volatile ("vcvtdq2ps %xmm1, %xmm0");
+ asm volatile ("vcvtdq2pd %xmm1, %xmm15");
+
+ asm volatile ("vcvtps2dq %xmm1, %xmm15");
+ asm volatile ("vcvtps2pd %xmm1, %xmm0");
+ asm volatile ("vcvtpd2ps %xmm1, %xmm15");
+ asm volatile ("vcvtpd2dq %xmm1, %xmm0");
+
+ asm volatile ("vcvtsd2si %xmm1, %rbx");
+ asm volatile ("vcvtsd2ss %xmm0, %xmm1, %xmm15");
+ asm volatile ("vcvtsi2sd %rcx, %xmm1, %xmm0");
+ asm volatile ("vcvtsi2ss %rcx, %xmm1, %xmm15");
+ asm volatile ("vcvtss2sd %xmm15, %xmm1, %xmm0");
+ asm volatile ("vcvtss2si %xmm1, %rbx");
+
+ asm volatile ("vcvttpd2dq %xmm1, %xmm0");
+ asm volatile ("vcvttps2dq %xmm1, %xmm15");
+ asm volatile ("vcvttsd2si %xmm0, %rbx");
+ asm volatile ("vcvttss2si %xmm1, %ecx");
+
+ return 0; /* end convert_test */
+}
+
/* This include is used to allocate the dynamic buffer and have
the pointers aligned to a 32-bit boundary, so we can test instructions
that require aligned memory. */
@@ -442,6 +788,7 @@ main ()
asm volatile ("vmovq %0, %%xmm15": : "m" (global_buf1));
vmov_test ();
+ reset_buffers ();
vpunpck_test ();
vpbroadcast_test ();
vzeroupper_test ();
@@ -449,5 +796,15 @@ main ()
vpcmpeq_test ();
vpmovmskb_test ();
arith_test ();
+ vaddsubpd_test ();
+ vaddsubps_test ();
+ shift_test ();
+ shuffle_test ();
+ permute_test ();
+ extract_insert_test ();
+ blend_test ();
+ compare_test ();
+ pack_test ();
+ convert_test ();
return 0; /* end of main */
}