aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexandre Oliva <oliva@adacore.com>2023-03-03 15:59:17 -0300
committerAlexandre Oliva <oliva@gnu.org>2023-03-03 15:59:17 -0300
commitcfa118fc089e38a94ec60ccf5b667aea015e5f60 (patch)
treeaf533015753507ba0afec68658389afd773627bf
parent220008eafaaed7433b1c18e394279391e885a138 (diff)
downloadgcc-cfa118fc089e38a94ec60ccf5b667aea015e5f60.zip
gcc-cfa118fc089e38a94ec60ccf5b667aea015e5f60.tar.gz
gcc-cfa118fc089e38a94ec60ccf5b667aea015e5f60.tar.bz2
[arm] complete vmsr/vmrs blank and case adjustments
Back in September last year, some of the vmsr and vmrs patterns had an extraneous blank removed, and the case of register names lowered, but another instance remained, and so did a testcase. for gcc/ChangeLog * config/arm/vfp.md (*thumb2_movsi_vfp): Drop blank after tab after vmsr and vmrs, and lower the case of P0. for gcc/testsuite/ChangeLog * gcc.target/arm/acle/cde-mve-full-assembly.c: Drop blank after tab after vmsr, and lower the case of P0.
-rw-r--r--gcc/config/arm/vfp.md4
-rw-r--r--gcc/testsuite/gcc.target/arm/acle/cde-mve-full-assembly.c264
2 files changed, 134 insertions, 134 deletions
diff --git a/gcc/config/arm/vfp.md b/gcc/config/arm/vfp.md
index f34f35e..60e7ba3 100644
--- a/gcc/config/arm/vfp.md
+++ b/gcc/config/arm/vfp.md
@@ -312,9 +312,9 @@
case 12: case 13:
return output_move_vfp (operands);
case 14:
- return \"vmsr\\t P0, %1\";
+ return \"vmsr\\tp0, %1\";
case 15:
- return \"vmrs\\t %0, P0\";
+ return \"vmrs\\t%0, p0\";
case 16:
return \"mcr\\tp10, 7, %1, cr1, cr0, 0\\t @SET_FPSCR\";
case 17:
diff --git a/gcc/testsuite/gcc.target/arm/acle/cde-mve-full-assembly.c b/gcc/testsuite/gcc.target/arm/acle/cde-mve-full-assembly.c
index d025c33..72f3301 100644
--- a/gcc/testsuite/gcc.target/arm/acle/cde-mve-full-assembly.c
+++ b/gcc/testsuite/gcc.target/arm/acle/cde-mve-full-assembly.c
@@ -534,80 +534,80 @@
contain back references). */
/*
** test_cde_vcx1q_mfloat16x8_tintint:
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
** vpst
** vcx1t p0, q0, #32
** bx lr
*/
/*
** test_cde_vcx1q_mfloat32x4_tintint:
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
** vpst
** vcx1t p0, q0, #32
** bx lr
*/
/*
** test_cde_vcx1q_muint8x16_tintint:
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
** vpst
** vcx1t p0, q0, #32
** bx lr
*/
/*
** test_cde_vcx1q_muint16x8_tintint:
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
** vpst
** vcx1t p0, q0, #32
** bx lr
*/
/*
** test_cde_vcx1q_muint32x4_tintint:
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
** vpst
** vcx1t p0, q0, #32
** bx lr
*/
/*
** test_cde_vcx1q_muint64x2_tintint:
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
** vpst
** vcx1t p0, q0, #32
** bx lr
*/
/*
** test_cde_vcx1q_mint8x16_tintint:
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
** vpst
** vcx1t p0, q0, #32
** bx lr
*/
/*
** test_cde_vcx1q_mint16x8_tintint:
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
** vpst
** vcx1t p0, q0, #32
** bx lr
*/
/*
** test_cde_vcx1q_mint32x4_tintint:
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
** vpst
** vcx1t p0, q0, #32
** bx lr
*/
/*
** test_cde_vcx1q_mint64x2_tintint:
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
** vpst
** vcx1t p0, q0, #32
** bx lr
@@ -616,80 +616,80 @@
/*
** test_cde_vcx1qa_mfloat16x8_tintint:
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
** vpst
** vcx1at p0, q0, #32
** bx lr
*/
/*
** test_cde_vcx1qa_mfloat32x4_tintint:
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
** vpst
** vcx1at p0, q0, #32
** bx lr
*/
/*
** test_cde_vcx1qa_muint8x16_tintint:
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
** vpst
** vcx1at p0, q0, #32
** bx lr
*/
/*
** test_cde_vcx1qa_muint16x8_tintint:
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
** vpst
** vcx1at p0, q0, #32
** bx lr
*/
/*
** test_cde_vcx1qa_muint32x4_tintint:
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
** vpst
** vcx1at p0, q0, #32
** bx lr
*/
/*
** test_cde_vcx1qa_muint64x2_tintint:
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
** vpst
** vcx1at p0, q0, #32
** bx lr
*/
/*
** test_cde_vcx1qa_mint8x16_tintint:
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
** vpst
** vcx1at p0, q0, #32
** bx lr
*/
/*
** test_cde_vcx1qa_mint16x8_tintint:
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
** vpst
** vcx1at p0, q0, #32
** bx lr
*/
/*
** test_cde_vcx1qa_mint32x4_tintint:
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
** vpst
** vcx1at p0, q0, #32
** bx lr
*/
/*
** test_cde_vcx1qa_mint64x2_tintint:
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
-** (?:vmov\.i32 q0, #0 @ v16qi|vmsr P0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
+** (?:vmov\.i32 q0, #0 @ v16qi|vmsr p0, r2 @ movhi)
** vpst
** vcx1at p0, q0, #32
** bx lr
@@ -698,8 +698,8 @@
/*
** test_cde_vcx2q_mfloat16x8_tuint16x8_tint:
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
** vpst
** vcx2t p0, (q[1-7]), q0, #32
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -707,8 +707,8 @@
*/
/*
** test_cde_vcx2q_mfloat16x8_tfloat32x4_tint:
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
** vpst
** vcx2t p0, (q[1-7]), q0, #32
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -716,8 +716,8 @@
*/
/*
** test_cde_vcx2q_mfloat32x4_tuint8x16_tint:
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
** vpst
** vcx2t p0, (q[1-7]), q0, #32
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -725,8 +725,8 @@
*/
/*
** test_cde_vcx2q_mint64x2_tuint8x16_tint:
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
** vpst
** vcx2t p0, (q[1-7]), q0, #32
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -734,8 +734,8 @@
*/
/*
** test_cde_vcx2q_mint8x16_tuint8x16_tint:
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
** vpst
** vcx2t p0, (q[1-7]), q0, #32
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -743,8 +743,8 @@
*/
/*
** test_cde_vcx2q_muint16x8_tuint8x16_tint:
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
** vpst
** vcx2t p0, (q[1-7]), q0, #32
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -752,8 +752,8 @@
*/
/*
** test_cde_vcx2q_muint8x16_tint64x2_tint:
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
** vpst
** vcx2t p0, (q[1-7]), q0, #32
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -761,8 +761,8 @@
*/
/*
** test_cde_vcx2q_muint8x16_tint8x16_tint:
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
** vpst
** vcx2t p0, (q[1-7]), q0, #32
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -770,8 +770,8 @@
*/
/*
** test_cde_vcx2q_muint8x16_tuint16x8_tint:
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
** vpst
** vcx2t p0, (q[1-7]), q0, #32
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -779,8 +779,8 @@
*/
/*
** test_cde_vcx2q_muint8x16_tuint8x16_tint:
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
** vpst
** vcx2t p0, (q[1-7]), q0, #32
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -790,8 +790,8 @@
/*
** test_cde_vcx2qa_mfloat16x8_tuint16x8_tint:
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
** vpst
** vcx2at p0, (q[1-7]), q0, #32
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -799,8 +799,8 @@
*/
/*
** test_cde_vcx2qa_mfloat16x8_tfloat32x4_tint:
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
** vpst
** vcx2at p0, (q[1-7]), q0, #32
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -808,8 +808,8 @@
*/
/*
** test_cde_vcx2qa_mfloat32x4_tuint8x16_tint:
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
** vpst
** vcx2at p0, (q[1-7]), q0, #32
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -817,8 +817,8 @@
*/
/*
** test_cde_vcx2qa_mint64x2_tuint8x16_tint:
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
** vpst
** vcx2at p0, (q[1-7]), q0, #32
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -826,8 +826,8 @@
*/
/*
** test_cde_vcx2qa_mint8x16_tuint8x16_tint:
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
** vpst
** vcx2at p0, (q[1-7]), q0, #32
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -835,8 +835,8 @@
*/
/*
** test_cde_vcx2qa_muint16x8_tuint8x16_tint:
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
** vpst
** vcx2at p0, (q[1-7]), q0, #32
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -844,8 +844,8 @@
*/
/*
** test_cde_vcx2qa_muint8x16_tint64x2_tint:
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
** vpst
** vcx2at p0, (q[1-7]), q0, #32
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -853,8 +853,8 @@
*/
/*
** test_cde_vcx2qa_muint8x16_tint8x16_tint:
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
** vpst
** vcx2at p0, (q[1-7]), q0, #32
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -862,8 +862,8 @@
*/
/*
** test_cde_vcx2qa_muint8x16_tuint16x8_tint:
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
** vpst
** vcx2at p0, (q[1-7]), q0, #32
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -871,8 +871,8 @@
*/
/*
** test_cde_vcx2qa_muint8x16_tuint8x16_tint:
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
-** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr P0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
+** (?:vmov\.i32 q[1-7], #0 @ v16qi|vmsr p0, r1 @ movhi)
** vpst
** vcx2at p0, (q[1-7]), q0, #32
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -882,8 +882,8 @@
/*
** test_cde_vcx3q_muint8x16_tuint8x16_tuint8x16_t:
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
** vpst
** vcx3t p0, (q[2-7]), q0, q1, #15
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -891,8 +891,8 @@
*/
/*
** test_cde_vcx3q_mfloat16x8_tfloat16x8_tfloat16x8_t:
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
** vpst
** vcx3t p0, (q[2-7]), q0, q1, #15
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -900,8 +900,8 @@
*/
/*
** test_cde_vcx3q_mfloat32x4_tuint64x2_tfloat16x8_t:
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
** vpst
** vcx3t p0, (q[2-7]), q0, q1, #15
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -909,8 +909,8 @@
*/
/*
** test_cde_vcx3q_muint16x8_tuint8x16_tuint8x16_t:
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
** vpst
** vcx3t p0, (q[2-7]), q0, q1, #15
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -918,8 +918,8 @@
*/
/*
** test_cde_vcx3q_muint8x16_tuint16x8_tuint8x16_t:
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
** vpst
** vcx3t p0, (q[2-7]), q0, q1, #15
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -927,8 +927,8 @@
*/
/*
** test_cde_vcx3q_muint8x16_tuint8x16_tuint16x8_t:
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
** vpst
** vcx3t p0, (q[2-7]), q0, q1, #15
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -936,8 +936,8 @@
*/
/*
** test_cde_vcx3q_mint8x16_tuint8x16_tuint8x16_t:
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
** vpst
** vcx3t p0, (q[2-7]), q0, q1, #15
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -945,8 +945,8 @@
*/
/*
** test_cde_vcx3q_muint8x16_tint8x16_tuint8x16_t:
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
** vpst
** vcx3t p0, (q[2-7]), q0, q1, #15
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -954,8 +954,8 @@
*/
/*
** test_cde_vcx3q_muint8x16_tuint8x16_tint8x16_t:
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
** vpst
** vcx3t p0, (q[2-7]), q0, q1, #15
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -963,8 +963,8 @@
*/
/*
** test_cde_vcx3q_mint64x2_tuint8x16_tuint8x16_t:
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
** vpst
** vcx3t p0, (q[2-7]), q0, q1, #15
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -972,8 +972,8 @@
*/
/*
** test_cde_vcx3q_muint8x16_tint64x2_tuint8x16_t:
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
** vpst
** vcx3t p0, (q[2-7]), q0, q1, #15
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -981,8 +981,8 @@
*/
/*
** test_cde_vcx3q_muint8x16_tuint8x16_tint64x2_t:
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
** vpst
** vcx3t p0, (q[2-7]), q0, q1, #15
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -990,8 +990,8 @@
*/
/*
** test_cde_vcx3q_muint8x16_tint64x2_tint64x2_t:
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
** vpst
** vcx3t p0, (q[2-7]), q0, q1, #15
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -1001,8 +1001,8 @@
/*
** test_cde_vcx3qa_muint8x16_tuint8x16_tuint8x16_t:
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
** vpst
** vcx3at p0, (q[2-7]), q0, q1, #15
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -1010,8 +1010,8 @@
*/
/*
** test_cde_vcx3qa_mfloat16x8_tfloat16x8_tfloat16x8_t:
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
** vpst
** vcx3at p0, (q[2-7]), q0, q1, #15
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -1019,8 +1019,8 @@
*/
/*
** test_cde_vcx3qa_mfloat32x4_tuint64x2_tfloat16x8_t:
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
** vpst
** vcx3at p0, (q[2-7]), q0, q1, #15
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -1028,8 +1028,8 @@
*/
/*
** test_cde_vcx3qa_muint16x8_tuint8x16_tuint8x16_t:
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
** vpst
** vcx3at p0, (q[2-7]), q0, q1, #15
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -1037,8 +1037,8 @@
*/
/*
** test_cde_vcx3qa_muint8x16_tuint16x8_tuint8x16_t:
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
** vpst
** vcx3at p0, (q[2-7]), q0, q1, #15
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -1046,8 +1046,8 @@
*/
/*
** test_cde_vcx3qa_muint8x16_tuint8x16_tuint16x8_t:
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
** vpst
** vcx3at p0, (q[2-7]), q0, q1, #15
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -1055,8 +1055,8 @@
*/
/*
** test_cde_vcx3qa_mint8x16_tuint8x16_tuint8x16_t:
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
** vpst
** vcx3at p0, (q[2-7]), q0, q1, #15
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -1064,8 +1064,8 @@
*/
/*
** test_cde_vcx3qa_muint8x16_tint8x16_tuint8x16_t:
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
** vpst
** vcx3at p0, (q[2-7]), q0, q1, #15
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -1073,8 +1073,8 @@
*/
/*
** test_cde_vcx3qa_muint8x16_tuint8x16_tint8x16_t:
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
** vpst
** vcx3at p0, (q[2-7]), q0, q1, #15
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -1082,8 +1082,8 @@
*/
/*
** test_cde_vcx3qa_mint64x2_tuint8x16_tuint8x16_t:
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
** vpst
** vcx3at p0, (q[2-7]), q0, q1, #15
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -1091,8 +1091,8 @@
*/
/*
** test_cde_vcx3qa_muint8x16_tint64x2_tuint8x16_t:
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
** vpst
** vcx3at p0, (q[2-7]), q0, q1, #15
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -1100,8 +1100,8 @@
*/
/*
** test_cde_vcx3qa_muint8x16_tuint8x16_tint64x2_t:
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
** vpst
** vcx3at p0, (q[2-7]), q0, q1, #15
** vmov q0, \1([[:space:]]+@ [^\n]*)?
@@ -1109,8 +1109,8 @@
*/
/*
** test_cde_vcx3qa_muint8x16_tint64x2_tint64x2_t:
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
-** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr P0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
+** (?:vmov\.i32 q[2-7], #0 @ v16qi|vmsr p0, r0 @ movhi)
** vpst
** vcx3at p0, (q[2-7]), q0, q1, #15
** vmov q0, \1([[:space:]]+@ [^\n]*)?