aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSrinath Parvathaneni <srinath.parvathaneni@arm.com>2020-03-17 15:32:36 +0000
committerKyrylo Tkachov <kyrylo.tkachov@arm.com>2020-03-17 15:32:36 +0000
commit33203b4c27d09b22b6cb4cc90970867eba2cda3f (patch)
tree7301d1a1cc1118de2689cfcff42b705aadb8858e
parentd71dba7b611f5e8404aa1b4361d319e856665a4a (diff)
downloadgcc-33203b4c27d09b22b6cb4cc90970867eba2cda3f.zip
gcc-33203b4c27d09b22b6cb4cc90970867eba2cda3f.tar.gz
gcc-33203b4c27d09b22b6cb4cc90970867eba2cda3f.tar.bz2
[ARM][GCC][4/2x]: MVE intrinsics with binary operands.
This patch supports following MVE ACLE intrinsics with binary operands. vsubq_u8, vsubq_n_u8, vrmulhq_u8, vrhaddq_u8, vqsubq_u8, vqsubq_n_u8, vqaddq_u8, vqaddq_n_u8, vorrq_u8, vornq_u8, vmulq_u8, vmulq_n_u8, vmulltq_int_u8, vmullbq_int_u8, vmulhq_u8, vmladavq_u8, vminvq_u8, vminq_u8, vmaxvq_u8, vmaxq_u8, vhsubq_u8, vhsubq_n_u8, vhaddq_u8, vhaddq_n_u8, veorq_u8, vcmpneq_n_u8, vcmphiq_u8, vcmphiq_n_u8, vcmpeqq_u8, vcmpeqq_n_u8, vcmpcsq_u8, vcmpcsq_n_u8, vcaddq_rot90_u8, vcaddq_rot270_u8, vbicq_u8, vandq_u8, vaddvq_p_u8, vaddvaq_u8, vaddq_n_u8, vabdq_u8, vshlq_r_u8, vrshlq_u8, vrshlq_n_u8, vqshlq_u8, vqshlq_r_u8, vqrshlq_u8, vqrshlq_n_u8, vminavq_s8, vminaq_s8, vmaxavq_s8, vmaxaq_s8, vbrsrq_n_u8, vshlq_n_u8, vrshrq_n_u8, vqshlq_n_u8, vcmpneq_n_s8, vcmpltq_s8, vcmpltq_n_s8, vcmpleq_s8, vcmpleq_n_s8, vcmpgtq_s8, vcmpgtq_n_s8, vcmpgeq_s8, vcmpgeq_n_s8, vcmpeqq_s8, vcmpeqq_n_s8, vqshluq_n_s8, vaddvq_p_s8, vsubq_s8, vsubq_n_s8, vshlq_r_s8, vrshlq_s8, vrshlq_n_s8, vrmulhq_s8, vrhaddq_s8, vqsubq_s8, vqsubq_n_s8, vqshlq_s8, vqshlq_r_s8, vqrshlq_s8, vqrshlq_n_s8, vqrdmulhq_s8, vqrdmulhq_n_s8, vqdmulhq_s8, vqdmulhq_n_s8, vqaddq_s8, vqaddq_n_s8, vorrq_s8, vornq_s8, vmulq_s8, vmulq_n_s8, vmulltq_int_s8, vmullbq_int_s8, vmulhq_s8, vmlsdavxq_s8, vmlsdavq_s8, vmladavxq_s8, vmladavq_s8, vminvq_s8, vminq_s8, vmaxvq_s8, vmaxq_s8, vhsubq_s8, vhsubq_n_s8, vhcaddq_rot90_s8, vhcaddq_rot270_s8, vhaddq_s8, vhaddq_n_s8, veorq_s8, vcaddq_rot90_s8, vcaddq_rot270_s8, vbrsrq_n_s8, vbicq_s8, vandq_s8, vaddvaq_s8, vaddq_n_s8, vabdq_s8, vshlq_n_s8, vrshrq_n_s8, vqshlq_n_s8, vsubq_u16, vsubq_n_u16, vrmulhq_u16, vrhaddq_u16, vqsubq_u16, vqsubq_n_u16, vqaddq_u16, vqaddq_n_u16, vorrq_u16, vornq_u16, vmulq_u16, vmulq_n_u16, vmulltq_int_u16, vmullbq_int_u16, vmulhq_u16, vmladavq_u16, vminvq_u16, vminq_u16, vmaxvq_u16, vmaxq_u16, vhsubq_u16, vhsubq_n_u16, vhaddq_u16, vhaddq_n_u16, veorq_u16, vcmpneq_n_u16, vcmphiq_u16, vcmphiq_n_u16, vcmpeqq_u16, vcmpeqq_n_u16, vcmpcsq_u16, vcmpcsq_n_u16, vcaddq_rot90_u16, vcaddq_rot270_u16, vbicq_u16, vandq_u16, vaddvq_p_u16, vaddvaq_u16, vaddq_n_u16, vabdq_u16, vshlq_r_u16, vrshlq_u16, vrshlq_n_u16, vqshlq_u16, vqshlq_r_u16, vqrshlq_u16, vqrshlq_n_u16, vminavq_s16, vminaq_s16, vmaxavq_s16, vmaxaq_s16, vbrsrq_n_u16, vshlq_n_u16, vrshrq_n_u16, vqshlq_n_u16, vcmpneq_n_s16, vcmpltq_s16, vcmpltq_n_s16, vcmpleq_s16, vcmpleq_n_s16, vcmpgtq_s16, vcmpgtq_n_s16, vcmpgeq_s16, vcmpgeq_n_s16, vcmpeqq_s16, vcmpeqq_n_s16, vqshluq_n_s16, vaddvq_p_s16, vsubq_s16, vsubq_n_s16, vshlq_r_s16, vrshlq_s16, vrshlq_n_s16, vrmulhq_s16, vrhaddq_s16, vqsubq_s16, vqsubq_n_s16, vqshlq_s16, vqshlq_r_s16, vqrshlq_s16, vqrshlq_n_s16, vqrdmulhq_s16, vqrdmulhq_n_s16, vqdmulhq_s16, vqdmulhq_n_s16, vqaddq_s16, vqaddq_n_s16, vorrq_s16, vornq_s16, vmulq_s16, vmulq_n_s16, vmulltq_int_s16, vmullbq_int_s16, vmulhq_s16, vmlsdavxq_s16, vmlsdavq_s16, vmladavxq_s16, vmladavq_s16, vminvq_s16, vminq_s16, vmaxvq_s16, vmaxq_s16, vhsubq_s16, vhsubq_n_s16, vhcaddq_rot90_s16, vhcaddq_rot270_s16, vhaddq_s16, vhaddq_n_s16, veorq_s16, vcaddq_rot90_s16, vcaddq_rot270_s16, vbrsrq_n_s16, vbicq_s16, vandq_s16, vaddvaq_s16, vaddq_n_s16, vabdq_s16, vshlq_n_s16, vrshrq_n_s16, vqshlq_n_s16, vsubq_u32, vsubq_n_u32, vrmulhq_u32, vrhaddq_u32, vqsubq_u32, vqsubq_n_u32, vqaddq_u32, vqaddq_n_u32, vorrq_u32, vornq_u32, vmulq_u32, vmulq_n_u32, vmulltq_int_u32, vmullbq_int_u32, vmulhq_u32, vmladavq_u32, vminvq_u32, vminq_u32, vmaxvq_u32, vmaxq_u32, vhsubq_u32, vhsubq_n_u32, vhaddq_u32, vhaddq_n_u32, veorq_u32, vcmpneq_n_u32, vcmphiq_u32, vcmphiq_n_u32, vcmpeqq_u32, vcmpeqq_n_u32, vcmpcsq_u32, vcmpcsq_n_u32, vcaddq_rot90_u32, vcaddq_rot270_u32, vbicq_u32, vandq_u32, vaddvq_p_u32, vaddvaq_u32, vaddq_n_u32, vabdq_u32, vshlq_r_u32, vrshlq_u32, vrshlq_n_u32, vqshlq_u32, vqshlq_r_u32, vqrshlq_u32, vqrshlq_n_u32, vminavq_s32, vminaq_s32, vmaxavq_s32, vmaxaq_s32, vbrsrq_n_u32, vshlq_n_u32, vrshrq_n_u32, vqshlq_n_u32, vcmpneq_n_s32, vcmpltq_s32, vcmpltq_n_s32, vcmpleq_s32, vcmpleq_n_s32, vcmpgtq_s32, vcmpgtq_n_s32, vcmpgeq_s32, vcmpgeq_n_s32, vcmpeqq_s32, vcmpeqq_n_s32, vqshluq_n_s32, vaddvq_p_s32, vsubq_s32, vsubq_n_s32, vshlq_r_s32, vrshlq_s32, vrshlq_n_s32, vrmulhq_s32, vrhaddq_s32, vqsubq_s32, vqsubq_n_s32, vqshlq_s32, vqshlq_r_s32, vqrshlq_s32, vqrshlq_n_s32, vqrdmulhq_s32, vqrdmulhq_n_s32, vqdmulhq_s32, vqdmulhq_n_s32, vqaddq_s32, vqaddq_n_s32, vorrq_s32, vornq_s32, vmulq_s32, vmulq_n_s32, vmulltq_int_s32, vmullbq_int_s32, vmulhq_s32, vmlsdavxq_s32, vmlsdavq_s32, vmladavxq_s32, vmladavq_s32, vminvq_s32, vminq_s32, vmaxvq_s32, vmaxq_s32, vhsubq_s32, vhsubq_n_s32, vhcaddq_rot90_s32, vhcaddq_rot270_s32, vhaddq_s32, vhaddq_n_s32, veorq_s32, vcaddq_rot90_s32, vcaddq_rot270_s32, vbrsrq_n_s32, vbicq_s32, vandq_s32, vaddvaq_s32, vaddq_n_s32, vabdq_s32, vshlq_n_s32, vrshrq_n_s32, vqshlq_n_s32. Please refer to M-profile Vector Extension (MVE) intrinsics [1] for more details. [1] https://developer.arm.com/architectures/instruction-sets/simd-isas/helium/mve-intrinsics In this patch new constraints "Ra" and "Rg" are added. Ra checks the constant is with in the range of 0 to 7 where as Rg checks that the constant is one among 1, 2, 4 and 8. Also a new predicates "mve_imm_7" and "mve_imm_selective_upto_8" are added, to check the the matching constraint Ra and Rg respectively. The above intrinsics are defined using the already defined builtin qualifiers BINOP_NONE_NONE_IMM, BINOP_NONE_NONE_NONE, BINOP_NONE_NONE_UNONE, BINOP_UNONE_NONE_IMM, BINOP_UNONE_NONE_NONE, BINOP_UNONE_UNONE_IMM, BINOP_UNONE_UNONE_NONE, BINOP_UNONE_UNONE_UNONE. 2020-03-17 Andre Vieira <andre.simoesdiasvieira@arm.com> Mihail Ionescu <mihail.ionescu@arm.com> Srinath Parvathaneni <srinath.parvathaneni@arm.com> * config/arm/arm_mve.h (vsubq_u8): Define macro. (vsubq_n_u8): Likewise. (vrmulhq_u8): Likewise. (vrhaddq_u8): Likewise. (vqsubq_u8): Likewise. (vqsubq_n_u8): Likewise. (vqaddq_u8): Likewise. (vqaddq_n_u8): Likewise. (vorrq_u8): Likewise. (vornq_u8): Likewise. (vmulq_u8): Likewise. (vmulq_n_u8): Likewise. (vmulltq_int_u8): Likewise. (vmullbq_int_u8): Likewise. (vmulhq_u8): Likewise. (vmladavq_u8): Likewise. (vminvq_u8): Likewise. (vminq_u8): Likewise. (vmaxvq_u8): Likewise. (vmaxq_u8): Likewise. (vhsubq_u8): Likewise. (vhsubq_n_u8): Likewise. (vhaddq_u8): Likewise. (vhaddq_n_u8): Likewise. (veorq_u8): Likewise. (vcmpneq_n_u8): Likewise. (vcmphiq_u8): Likewise. (vcmphiq_n_u8): Likewise. (vcmpeqq_u8): Likewise. (vcmpeqq_n_u8): Likewise. (vcmpcsq_u8): Likewise. (vcmpcsq_n_u8): Likewise. (vcaddq_rot90_u8): Likewise. (vcaddq_rot270_u8): Likewise. (vbicq_u8): Likewise. (vandq_u8): Likewise. (vaddvq_p_u8): Likewise. (vaddvaq_u8): Likewise. (vaddq_n_u8): Likewise. (vabdq_u8): Likewise. (vshlq_r_u8): Likewise. (vrshlq_u8): Likewise. (vrshlq_n_u8): Likewise. (vqshlq_u8): Likewise. (vqshlq_r_u8): Likewise. (vqrshlq_u8): Likewise. (vqrshlq_n_u8): Likewise. (vminavq_s8): Likewise. (vminaq_s8): Likewise. (vmaxavq_s8): Likewise. (vmaxaq_s8): Likewise. (vbrsrq_n_u8): Likewise. (vshlq_n_u8): Likewise. (vrshrq_n_u8): Likewise. (vqshlq_n_u8): Likewise. (vcmpneq_n_s8): Likewise. (vcmpltq_s8): Likewise. (vcmpltq_n_s8): Likewise. (vcmpleq_s8): Likewise. (vcmpleq_n_s8): Likewise. (vcmpgtq_s8): Likewise. (vcmpgtq_n_s8): Likewise. (vcmpgeq_s8): Likewise. (vcmpgeq_n_s8): Likewise. (vcmpeqq_s8): Likewise. (vcmpeqq_n_s8): Likewise. (vqshluq_n_s8): Likewise. (vaddvq_p_s8): Likewise. (vsubq_s8): Likewise. (vsubq_n_s8): Likewise. (vshlq_r_s8): Likewise. (vrshlq_s8): Likewise. (vrshlq_n_s8): Likewise. (vrmulhq_s8): Likewise. (vrhaddq_s8): Likewise. (vqsubq_s8): Likewise. (vqsubq_n_s8): Likewise. (vqshlq_s8): Likewise. (vqshlq_r_s8): Likewise. (vqrshlq_s8): Likewise. (vqrshlq_n_s8): Likewise. (vqrdmulhq_s8): Likewise. (vqrdmulhq_n_s8): Likewise. (vqdmulhq_s8): Likewise. (vqdmulhq_n_s8): Likewise. (vqaddq_s8): Likewise. (vqaddq_n_s8): Likewise. (vorrq_s8): Likewise. (vornq_s8): Likewise. (vmulq_s8): Likewise. (vmulq_n_s8): Likewise. (vmulltq_int_s8): Likewise. (vmullbq_int_s8): Likewise. (vmulhq_s8): Likewise. (vmlsdavxq_s8): Likewise. (vmlsdavq_s8): Likewise. (vmladavxq_s8): Likewise. (vmladavq_s8): Likewise. (vminvq_s8): Likewise. (vminq_s8): Likewise. (vmaxvq_s8): Likewise. (vmaxq_s8): Likewise. (vhsubq_s8): Likewise. (vhsubq_n_s8): Likewise. (vhcaddq_rot90_s8): Likewise. (vhcaddq_rot270_s8): Likewise. (vhaddq_s8): Likewise. (vhaddq_n_s8): Likewise. (veorq_s8): Likewise. (vcaddq_rot90_s8): Likewise. (vcaddq_rot270_s8): Likewise. (vbrsrq_n_s8): Likewise. (vbicq_s8): Likewise. (vandq_s8): Likewise. (vaddvaq_s8): Likewise. (vaddq_n_s8): Likewise. (vabdq_s8): Likewise. (vshlq_n_s8): Likewise. (vrshrq_n_s8): Likewise. (vqshlq_n_s8): Likewise. (vsubq_u16): Likewise. (vsubq_n_u16): Likewise. (vrmulhq_u16): Likewise. (vrhaddq_u16): Likewise. (vqsubq_u16): Likewise. (vqsubq_n_u16): Likewise. (vqaddq_u16): Likewise. (vqaddq_n_u16): Likewise. (vorrq_u16): Likewise. (vornq_u16): Likewise. (vmulq_u16): Likewise. (vmulq_n_u16): Likewise. (vmulltq_int_u16): Likewise. (vmullbq_int_u16): Likewise. (vmulhq_u16): Likewise. (vmladavq_u16): Likewise. (vminvq_u16): Likewise. (vminq_u16): Likewise. (vmaxvq_u16): Likewise. (vmaxq_u16): Likewise. (vhsubq_u16): Likewise. (vhsubq_n_u16): Likewise. (vhaddq_u16): Likewise. (vhaddq_n_u16): Likewise. (veorq_u16): Likewise. (vcmpneq_n_u16): Likewise. (vcmphiq_u16): Likewise. (vcmphiq_n_u16): Likewise. (vcmpeqq_u16): Likewise. (vcmpeqq_n_u16): Likewise. (vcmpcsq_u16): Likewise. (vcmpcsq_n_u16): Likewise. (vcaddq_rot90_u16): Likewise. (vcaddq_rot270_u16): Likewise. (vbicq_u16): Likewise. (vandq_u16): Likewise. (vaddvq_p_u16): Likewise. (vaddvaq_u16): Likewise. (vaddq_n_u16): Likewise. (vabdq_u16): Likewise. (vshlq_r_u16): Likewise. (vrshlq_u16): Likewise. (vrshlq_n_u16): Likewise. (vqshlq_u16): Likewise. (vqshlq_r_u16): Likewise. (vqrshlq_u16): Likewise. (vqrshlq_n_u16): Likewise. (vminavq_s16): Likewise. (vminaq_s16): Likewise. (vmaxavq_s16): Likewise. (vmaxaq_s16): Likewise. (vbrsrq_n_u16): Likewise. (vshlq_n_u16): Likewise. (vrshrq_n_u16): Likewise. (vqshlq_n_u16): Likewise. (vcmpneq_n_s16): Likewise. (vcmpltq_s16): Likewise. (vcmpltq_n_s16): Likewise. (vcmpleq_s16): Likewise. (vcmpleq_n_s16): Likewise. (vcmpgtq_s16): Likewise. (vcmpgtq_n_s16): Likewise. (vcmpgeq_s16): Likewise. (vcmpgeq_n_s16): Likewise. (vcmpeqq_s16): Likewise. (vcmpeqq_n_s16): Likewise. (vqshluq_n_s16): Likewise. (vaddvq_p_s16): Likewise. (vsubq_s16): Likewise. (vsubq_n_s16): Likewise. (vshlq_r_s16): Likewise. (vrshlq_s16): Likewise. (vrshlq_n_s16): Likewise. (vrmulhq_s16): Likewise. (vrhaddq_s16): Likewise. (vqsubq_s16): Likewise. (vqsubq_n_s16): Likewise. (vqshlq_s16): Likewise. (vqshlq_r_s16): Likewise. (vqrshlq_s16): Likewise. (vqrshlq_n_s16): Likewise. (vqrdmulhq_s16): Likewise. (vqrdmulhq_n_s16): Likewise. (vqdmulhq_s16): Likewise. (vqdmulhq_n_s16): Likewise. (vqaddq_s16): Likewise. (vqaddq_n_s16): Likewise. (vorrq_s16): Likewise. (vornq_s16): Likewise. (vmulq_s16): Likewise. (vmulq_n_s16): Likewise. (vmulltq_int_s16): Likewise. (vmullbq_int_s16): Likewise. (vmulhq_s16): Likewise. (vmlsdavxq_s16): Likewise. (vmlsdavq_s16): Likewise. (vmladavxq_s16): Likewise. (vmladavq_s16): Likewise. (vminvq_s16): Likewise. (vminq_s16): Likewise. (vmaxvq_s16): Likewise. (vmaxq_s16): Likewise. (vhsubq_s16): Likewise. (vhsubq_n_s16): Likewise. (vhcaddq_rot90_s16): Likewise. (vhcaddq_rot270_s16): Likewise. (vhaddq_s16): Likewise. (vhaddq_n_s16): Likewise. (veorq_s16): Likewise. (vcaddq_rot90_s16): Likewise. (vcaddq_rot270_s16): Likewise. (vbrsrq_n_s16): Likewise. (vbicq_s16): Likewise. (vandq_s16): Likewise. (vaddvaq_s16): Likewise. (vaddq_n_s16): Likewise. (vabdq_s16): Likewise. (vshlq_n_s16): Likewise. (vrshrq_n_s16): Likewise. (vqshlq_n_s16): Likewise. (vsubq_u32): Likewise. (vsubq_n_u32): Likewise. (vrmulhq_u32): Likewise. (vrhaddq_u32): Likewise. (vqsubq_u32): Likewise. (vqsubq_n_u32): Likewise. (vqaddq_u32): Likewise. (vqaddq_n_u32): Likewise. (vorrq_u32): Likewise. (vornq_u32): Likewise. (vmulq_u32): Likewise. (vmulq_n_u32): Likewise. (vmulltq_int_u32): Likewise. (vmullbq_int_u32): Likewise. (vmulhq_u32): Likewise. (vmladavq_u32): Likewise. (vminvq_u32): Likewise. (vminq_u32): Likewise. (vmaxvq_u32): Likewise. (vmaxq_u32): Likewise. (vhsubq_u32): Likewise. (vhsubq_n_u32): Likewise. (vhaddq_u32): Likewise. (vhaddq_n_u32): Likewise. (veorq_u32): Likewise. (vcmpneq_n_u32): Likewise. (vcmphiq_u32): Likewise. (vcmphiq_n_u32): Likewise. (vcmpeqq_u32): Likewise. (vcmpeqq_n_u32): Likewise. (vcmpcsq_u32): Likewise. (vcmpcsq_n_u32): Likewise. (vcaddq_rot90_u32): Likewise. (vcaddq_rot270_u32): Likewise. (vbicq_u32): Likewise. (vandq_u32): Likewise. (vaddvq_p_u32): Likewise. (vaddvaq_u32): Likewise. (vaddq_n_u32): Likewise. (vabdq_u32): Likewise. (vshlq_r_u32): Likewise. (vrshlq_u32): Likewise. (vrshlq_n_u32): Likewise. (vqshlq_u32): Likewise. (vqshlq_r_u32): Likewise. (vqrshlq_u32): Likewise. (vqrshlq_n_u32): Likewise. (vminavq_s32): Likewise. (vminaq_s32): Likewise. (vmaxavq_s32): Likewise. (vmaxaq_s32): Likewise. (vbrsrq_n_u32): Likewise. (vshlq_n_u32): Likewise. (vrshrq_n_u32): Likewise. (vqshlq_n_u32): Likewise. (vcmpneq_n_s32): Likewise. (vcmpltq_s32): Likewise. (vcmpltq_n_s32): Likewise. (vcmpleq_s32): Likewise. (vcmpleq_n_s32): Likewise. (vcmpgtq_s32): Likewise. (vcmpgtq_n_s32): Likewise. (vcmpgeq_s32): Likewise. (vcmpgeq_n_s32): Likewise. (vcmpeqq_s32): Likewise. (vcmpeqq_n_s32): Likewise. (vqshluq_n_s32): Likewise. (vaddvq_p_s32): Likewise. (vsubq_s32): Likewise. (vsubq_n_s32): Likewise. (vshlq_r_s32): Likewise. (vrshlq_s32): Likewise. (vrshlq_n_s32): Likewise. (vrmulhq_s32): Likewise. (vrhaddq_s32): Likewise. (vqsubq_s32): Likewise. (vqsubq_n_s32): Likewise. (vqshlq_s32): Likewise. (vqshlq_r_s32): Likewise. (vqrshlq_s32): Likewise. (vqrshlq_n_s32): Likewise. (vqrdmulhq_s32): Likewise. (vqrdmulhq_n_s32): Likewise. (vqdmulhq_s32): Likewise. (vqdmulhq_n_s32): Likewise. (vqaddq_s32): Likewise. (vqaddq_n_s32): Likewise. (vorrq_s32): Likewise. (vornq_s32): Likewise. (vmulq_s32): Likewise. (vmulq_n_s32): Likewise. (vmulltq_int_s32): Likewise. (vmullbq_int_s32): Likewise. (vmulhq_s32): Likewise. (vmlsdavxq_s32): Likewise. (vmlsdavq_s32): Likewise. (vmladavxq_s32): Likewise. (vmladavq_s32): Likewise. (vminvq_s32): Likewise. (vminq_s32): Likewise. (vmaxvq_s32): Likewise. (vmaxq_s32): Likewise. (vhsubq_s32): Likewise. (vhsubq_n_s32): Likewise. (vhcaddq_rot90_s32): Likewise. (vhcaddq_rot270_s32): Likewise. (vhaddq_s32): Likewise. (vhaddq_n_s32): Likewise. (veorq_s32): Likewise. (vcaddq_rot90_s32): Likewise. (vcaddq_rot270_s32): Likewise. (vbrsrq_n_s32): Likewise. (vbicq_s32): Likewise. (vandq_s32): Likewise. (vaddvaq_s32): Likewise. (vaddq_n_s32): Likewise. (vabdq_s32): Likewise. (vshlq_n_s32): Likewise. (vrshrq_n_s32): Likewise. (vqshlq_n_s32): Likewise. (__arm_vsubq_u8): Define intrinsic. (__arm_vsubq_n_u8): Likewise. (__arm_vrmulhq_u8): Likewise. (__arm_vrhaddq_u8): Likewise. (__arm_vqsubq_u8): Likewise. (__arm_vqsubq_n_u8): Likewise. (__arm_vqaddq_u8): Likewise. (__arm_vqaddq_n_u8): Likewise. (__arm_vorrq_u8): Likewise. (__arm_vornq_u8): Likewise. (__arm_vmulq_u8): Likewise. (__arm_vmulq_n_u8): Likewise. (__arm_vmulltq_int_u8): Likewise. (__arm_vmullbq_int_u8): Likewise. (__arm_vmulhq_u8): Likewise. (__arm_vmladavq_u8): Likewise. (__arm_vminvq_u8): Likewise. (__arm_vminq_u8): Likewise. (__arm_vmaxvq_u8): Likewise. (__arm_vmaxq_u8): Likewise. (__arm_vhsubq_u8): Likewise. (__arm_vhsubq_n_u8): Likewise. (__arm_vhaddq_u8): Likewise. (__arm_vhaddq_n_u8): Likewise. (__arm_veorq_u8): Likewise. (__arm_vcmpneq_n_u8): Likewise. (__arm_vcmphiq_u8): Likewise. (__arm_vcmphiq_n_u8): Likewise. (__arm_vcmpeqq_u8): Likewise. (__arm_vcmpeqq_n_u8): Likewise. (__arm_vcmpcsq_u8): Likewise. (__arm_vcmpcsq_n_u8): Likewise. (__arm_vcaddq_rot90_u8): Likewise. (__arm_vcaddq_rot270_u8): Likewise. (__arm_vbicq_u8): Likewise. (__arm_vandq_u8): Likewise. (__arm_vaddvq_p_u8): Likewise. (__arm_vaddvaq_u8): Likewise. (__arm_vaddq_n_u8): Likewise. (__arm_vabdq_u8): Likewise. (__arm_vshlq_r_u8): Likewise. (__arm_vrshlq_u8): Likewise. (__arm_vrshlq_n_u8): Likewise. (__arm_vqshlq_u8): Likewise. (__arm_vqshlq_r_u8): Likewise. (__arm_vqrshlq_u8): Likewise. (__arm_vqrshlq_n_u8): Likewise. (__arm_vminavq_s8): Likewise. (__arm_vminaq_s8): Likewise. (__arm_vmaxavq_s8): Likewise. (__arm_vmaxaq_s8): Likewise. (__arm_vbrsrq_n_u8): Likewise. (__arm_vshlq_n_u8): Likewise. (__arm_vrshrq_n_u8): Likewise. (__arm_vqshlq_n_u8): Likewise. (__arm_vcmpneq_n_s8): Likewise. (__arm_vcmpltq_s8): Likewise. (__arm_vcmpltq_n_s8): Likewise. (__arm_vcmpleq_s8): Likewise. (__arm_vcmpleq_n_s8): Likewise. (__arm_vcmpgtq_s8): Likewise. (__arm_vcmpgtq_n_s8): Likewise. (__arm_vcmpgeq_s8): Likewise. (__arm_vcmpgeq_n_s8): Likewise. (__arm_vcmpeqq_s8): Likewise. (__arm_vcmpeqq_n_s8): Likewise. (__arm_vqshluq_n_s8): Likewise. (__arm_vaddvq_p_s8): Likewise. (__arm_vsubq_s8): Likewise. (__arm_vsubq_n_s8): Likewise. (__arm_vshlq_r_s8): Likewise. (__arm_vrshlq_s8): Likewise. (__arm_vrshlq_n_s8): Likewise. (__arm_vrmulhq_s8): Likewise. (__arm_vrhaddq_s8): Likewise. (__arm_vqsubq_s8): Likewise. (__arm_vqsubq_n_s8): Likewise. (__arm_vqshlq_s8): Likewise. (__arm_vqshlq_r_s8): Likewise. (__arm_vqrshlq_s8): Likewise. (__arm_vqrshlq_n_s8): Likewise. (__arm_vqrdmulhq_s8): Likewise. (__arm_vqrdmulhq_n_s8): Likewise. (__arm_vqdmulhq_s8): Likewise. (__arm_vqdmulhq_n_s8): Likewise. (__arm_vqaddq_s8): Likewise. (__arm_vqaddq_n_s8): Likewise. (__arm_vorrq_s8): Likewise. (__arm_vornq_s8): Likewise. (__arm_vmulq_s8): Likewise. (__arm_vmulq_n_s8): Likewise. (__arm_vmulltq_int_s8): Likewise. (__arm_vmullbq_int_s8): Likewise. (__arm_vmulhq_s8): Likewise. (__arm_vmlsdavxq_s8): Likewise. (__arm_vmlsdavq_s8): Likewise. (__arm_vmladavxq_s8): Likewise. (__arm_vmladavq_s8): Likewise. (__arm_vminvq_s8): Likewise. (__arm_vminq_s8): Likewise. (__arm_vmaxvq_s8): Likewise. (__arm_vmaxq_s8): Likewise. (__arm_vhsubq_s8): Likewise. (__arm_vhsubq_n_s8): Likewise. (__arm_vhcaddq_rot90_s8): Likewise. (__arm_vhcaddq_rot270_s8): Likewise. (__arm_vhaddq_s8): Likewise. (__arm_vhaddq_n_s8): Likewise. (__arm_veorq_s8): Likewise. (__arm_vcaddq_rot90_s8): Likewise. (__arm_vcaddq_rot270_s8): Likewise. (__arm_vbrsrq_n_s8): Likewise. (__arm_vbicq_s8): Likewise. (__arm_vandq_s8): Likewise. (__arm_vaddvaq_s8): Likewise. (__arm_vaddq_n_s8): Likewise. (__arm_vabdq_s8): Likewise. (__arm_vshlq_n_s8): Likewise. (__arm_vrshrq_n_s8): Likewise. (__arm_vqshlq_n_s8): Likewise. (__arm_vsubq_u16): Likewise. (__arm_vsubq_n_u16): Likewise. (__arm_vrmulhq_u16): Likewise. (__arm_vrhaddq_u16): Likewise. (__arm_vqsubq_u16): Likewise. (__arm_vqsubq_n_u16): Likewise. (__arm_vqaddq_u16): Likewise. (__arm_vqaddq_n_u16): Likewise. (__arm_vorrq_u16): Likewise. (__arm_vornq_u16): Likewise. (__arm_vmulq_u16): Likewise. (__arm_vmulq_n_u16): Likewise. (__arm_vmulltq_int_u16): Likewise. (__arm_vmullbq_int_u16): Likewise. (__arm_vmulhq_u16): Likewise. (__arm_vmladavq_u16): Likewise. (__arm_vminvq_u16): Likewise. (__arm_vminq_u16): Likewise. (__arm_vmaxvq_u16): Likewise. (__arm_vmaxq_u16): Likewise. (__arm_vhsubq_u16): Likewise. (__arm_vhsubq_n_u16): Likewise. (__arm_vhaddq_u16): Likewise. (__arm_vhaddq_n_u16): Likewise. (__arm_veorq_u16): Likewise. (__arm_vcmpneq_n_u16): Likewise. (__arm_vcmphiq_u16): Likewise. (__arm_vcmphiq_n_u16): Likewise. (__arm_vcmpeqq_u16): Likewise. (__arm_vcmpeqq_n_u16): Likewise. (__arm_vcmpcsq_u16): Likewise. (__arm_vcmpcsq_n_u16): Likewise. (__arm_vcaddq_rot90_u16): Likewise. (__arm_vcaddq_rot270_u16): Likewise. (__arm_vbicq_u16): Likewise. (__arm_vandq_u16): Likewise. (__arm_vaddvq_p_u16): Likewise. (__arm_vaddvaq_u16): Likewise. (__arm_vaddq_n_u16): Likewise. (__arm_vabdq_u16): Likewise. (__arm_vshlq_r_u16): Likewise. (__arm_vrshlq_u16): Likewise. (__arm_vrshlq_n_u16): Likewise. (__arm_vqshlq_u16): Likewise. (__arm_vqshlq_r_u16): Likewise. (__arm_vqrshlq_u16): Likewise. (__arm_vqrshlq_n_u16): Likewise. (__arm_vminavq_s16): Likewise. (__arm_vminaq_s16): Likewise. (__arm_vmaxavq_s16): Likewise. (__arm_vmaxaq_s16): Likewise. (__arm_vbrsrq_n_u16): Likewise. (__arm_vshlq_n_u16): Likewise. (__arm_vrshrq_n_u16): Likewise. (__arm_vqshlq_n_u16): Likewise. (__arm_vcmpneq_n_s16): Likewise. (__arm_vcmpltq_s16): Likewise. (__arm_vcmpltq_n_s16): Likewise. (__arm_vcmpleq_s16): Likewise. (__arm_vcmpleq_n_s16): Likewise. (__arm_vcmpgtq_s16): Likewise. (__arm_vcmpgtq_n_s16): Likewise. (__arm_vcmpgeq_s16): Likewise. (__arm_vcmpgeq_n_s16): Likewise. (__arm_vcmpeqq_s16): Likewise. (__arm_vcmpeqq_n_s16): Likewise. (__arm_vqshluq_n_s16): Likewise. (__arm_vaddvq_p_s16): Likewise. (__arm_vsubq_s16): Likewise. (__arm_vsubq_n_s16): Likewise. (__arm_vshlq_r_s16): Likewise. (__arm_vrshlq_s16): Likewise. (__arm_vrshlq_n_s16): Likewise. (__arm_vrmulhq_s16): Likewise. (__arm_vrhaddq_s16): Likewise. (__arm_vqsubq_s16): Likewise. (__arm_vqsubq_n_s16): Likewise. (__arm_vqshlq_s16): Likewise. (__arm_vqshlq_r_s16): Likewise. (__arm_vqrshlq_s16): Likewise. (__arm_vqrshlq_n_s16): Likewise. (__arm_vqrdmulhq_s16): Likewise. (__arm_vqrdmulhq_n_s16): Likewise. (__arm_vqdmulhq_s16): Likewise. (__arm_vqdmulhq_n_s16): Likewise. (__arm_vqaddq_s16): Likewise. (__arm_vqaddq_n_s16): Likewise. (__arm_vorrq_s16): Likewise. (__arm_vornq_s16): Likewise. (__arm_vmulq_s16): Likewise. (__arm_vmulq_n_s16): Likewise. (__arm_vmulltq_int_s16): Likewise. (__arm_vmullbq_int_s16): Likewise. (__arm_vmulhq_s16): Likewise. (__arm_vmlsdavxq_s16): Likewise. (__arm_vmlsdavq_s16): Likewise. (__arm_vmladavxq_s16): Likewise. (__arm_vmladavq_s16): Likewise. (__arm_vminvq_s16): Likewise. (__arm_vminq_s16): Likewise. (__arm_vmaxvq_s16): Likewise. (__arm_vmaxq_s16): Likewise. (__arm_vhsubq_s16): Likewise. (__arm_vhsubq_n_s16): Likewise. (__arm_vhcaddq_rot90_s16): Likewise. (__arm_vhcaddq_rot270_s16): Likewise. (__arm_vhaddq_s16): Likewise. (__arm_vhaddq_n_s16): Likewise. (__arm_veorq_s16): Likewise. (__arm_vcaddq_rot90_s16): Likewise. (__arm_vcaddq_rot270_s16): Likewise. (__arm_vbrsrq_n_s16): Likewise. (__arm_vbicq_s16): Likewise. (__arm_vandq_s16): Likewise. (__arm_vaddvaq_s16): Likewise. (__arm_vaddq_n_s16): Likewise. (__arm_vabdq_s16): Likewise. (__arm_vshlq_n_s16): Likewise. (__arm_vrshrq_n_s16): Likewise. (__arm_vqshlq_n_s16): Likewise. (__arm_vsubq_u32): Likewise. (__arm_vsubq_n_u32): Likewise. (__arm_vrmulhq_u32): Likewise. (__arm_vrhaddq_u32): Likewise. (__arm_vqsubq_u32): Likewise. (__arm_vqsubq_n_u32): Likewise. (__arm_vqaddq_u32): Likewise. (__arm_vqaddq_n_u32): Likewise. (__arm_vorrq_u32): Likewise. (__arm_vornq_u32): Likewise. (__arm_vmulq_u32): Likewise. (__arm_vmulq_n_u32): Likewise. (__arm_vmulltq_int_u32): Likewise. (__arm_vmullbq_int_u32): Likewise. (__arm_vmulhq_u32): Likewise. (__arm_vmladavq_u32): Likewise. (__arm_vminvq_u32): Likewise. (__arm_vminq_u32): Likewise. (__arm_vmaxvq_u32): Likewise. (__arm_vmaxq_u32): Likewise. (__arm_vhsubq_u32): Likewise. (__arm_vhsubq_n_u32): Likewise. (__arm_vhaddq_u32): Likewise. (__arm_vhaddq_n_u32): Likewise. (__arm_veorq_u32): Likewise. (__arm_vcmpneq_n_u32): Likewise. (__arm_vcmphiq_u32): Likewise. (__arm_vcmphiq_n_u32): Likewise. (__arm_vcmpeqq_u32): Likewise. (__arm_vcmpeqq_n_u32): Likewise. (__arm_vcmpcsq_u32): Likewise. (__arm_vcmpcsq_n_u32): Likewise. (__arm_vcaddq_rot90_u32): Likewise. (__arm_vcaddq_rot270_u32): Likewise. (__arm_vbicq_u32): Likewise. (__arm_vandq_u32): Likewise. (__arm_vaddvq_p_u32): Likewise. (__arm_vaddvaq_u32): Likewise. (__arm_vaddq_n_u32): Likewise. (__arm_vabdq_u32): Likewise. (__arm_vshlq_r_u32): Likewise. (__arm_vrshlq_u32): Likewise. (__arm_vrshlq_n_u32): Likewise. (__arm_vqshlq_u32): Likewise. (__arm_vqshlq_r_u32): Likewise. (__arm_vqrshlq_u32): Likewise. (__arm_vqrshlq_n_u32): Likewise. (__arm_vminavq_s32): Likewise. (__arm_vminaq_s32): Likewise. (__arm_vmaxavq_s32): Likewise. (__arm_vmaxaq_s32): Likewise. (__arm_vbrsrq_n_u32): Likewise. (__arm_vshlq_n_u32): Likewise. (__arm_vrshrq_n_u32): Likewise. (__arm_vqshlq_n_u32): Likewise. (__arm_vcmpneq_n_s32): Likewise. (__arm_vcmpltq_s32): Likewise. (__arm_vcmpltq_n_s32): Likewise. (__arm_vcmpleq_s32): Likewise. (__arm_vcmpleq_n_s32): Likewise. (__arm_vcmpgtq_s32): Likewise. (__arm_vcmpgtq_n_s32): Likewise. (__arm_vcmpgeq_s32): Likewise. (__arm_vcmpgeq_n_s32): Likewise. (__arm_vcmpeqq_s32): Likewise. (__arm_vcmpeqq_n_s32): Likewise. (__arm_vqshluq_n_s32): Likewise. (__arm_vaddvq_p_s32): Likewise. (__arm_vsubq_s32): Likewise. (__arm_vsubq_n_s32): Likewise. (__arm_vshlq_r_s32): Likewise. (__arm_vrshlq_s32): Likewise. (__arm_vrshlq_n_s32): Likewise. (__arm_vrmulhq_s32): Likewise. (__arm_vrhaddq_s32): Likewise. (__arm_vqsubq_s32): Likewise. (__arm_vqsubq_n_s32): Likewise. (__arm_vqshlq_s32): Likewise. (__arm_vqshlq_r_s32): Likewise. (__arm_vqrshlq_s32): Likewise. (__arm_vqrshlq_n_s32): Likewise. (__arm_vqrdmulhq_s32): Likewise. (__arm_vqrdmulhq_n_s32): Likewise. (__arm_vqdmulhq_s32): Likewise. (__arm_vqdmulhq_n_s32): Likewise. (__arm_vqaddq_s32): Likewise. (__arm_vqaddq_n_s32): Likewise. (__arm_vorrq_s32): Likewise. (__arm_vornq_s32): Likewise. (__arm_vmulq_s32): Likewise. (__arm_vmulq_n_s32): Likewise. (__arm_vmulltq_int_s32): Likewise. (__arm_vmullbq_int_s32): Likewise. (__arm_vmulhq_s32): Likewise. (__arm_vmlsdavxq_s32): Likewise. (__arm_vmlsdavq_s32): Likewise. (__arm_vmladavxq_s32): Likewise. (__arm_vmladavq_s32): Likewise. (__arm_vminvq_s32): Likewise. (__arm_vminq_s32): Likewise. (__arm_vmaxvq_s32): Likewise. (__arm_vmaxq_s32): Likewise. (__arm_vhsubq_s32): Likewise. (__arm_vhsubq_n_s32): Likewise. (__arm_vhcaddq_rot90_s32): Likewise. (__arm_vhcaddq_rot270_s32): Likewise. (__arm_vhaddq_s32): Likewise. (__arm_vhaddq_n_s32): Likewise. (__arm_veorq_s32): Likewise. (__arm_vcaddq_rot90_s32): Likewise. (__arm_vcaddq_rot270_s32): Likewise. (__arm_vbrsrq_n_s32): Likewise. (__arm_vbicq_s32): Likewise. (__arm_vandq_s32): Likewise. (__arm_vaddvaq_s32): Likewise. (__arm_vaddq_n_s32): Likewise. (__arm_vabdq_s32): Likewise. (__arm_vshlq_n_s32): Likewise. (__arm_vrshrq_n_s32): Likewise. (__arm_vqshlq_n_s32): Likewise. (vsubq): Define polymorphic variant. (vsubq_n): Likewise. (vshlq_r): Likewise. (vrshlq_n): Likewise. (vrshlq): Likewise. (vrmulhq): Likewise. (vrhaddq): Likewise. (vqsubq_n): Likewise. (vqsubq): Likewise. (vqshlq): Likewise. (vqshlq_r): Likewise. (vqshluq): Likewise. (vrshrq_n): Likewise. (vshlq_n): Likewise. (vqshluq_n): Likewise. (vqshlq_n): Likewise. (vqrshlq_n): Likewise. (vqrshlq): Likewise. (vqrdmulhq_n): Likewise. (vqrdmulhq): Likewise. (vqdmulhq_n): Likewise. (vqdmulhq): Likewise. (vqaddq_n): Likewise. (vqaddq): Likewise. (vorrq_n): Likewise. (vorrq): Likewise. (vornq): Likewise. (vmulq_n): Likewise. (vmulq): Likewise. (vmulltq_int): Likewise. (vmullbq_int): Likewise. (vmulhq): Likewise. (vminq): Likewise. (vminaq): Likewise. (vmaxq): Likewise. (vmaxaq): Likewise. (vhsubq_n): Likewise. (vhsubq): Likewise. (vhcaddq_rot90): Likewise. (vhcaddq_rot270): Likewise. (vhaddq_n): Likewise. (vhaddq): Likewise. (veorq): Likewise. (vcaddq_rot90): Likewise. (vcaddq_rot270): Likewise. (vbrsrq_n): Likewise. (vbicq_n): Likewise. (vbicq): Likewise. (vaddq): Likewise. (vaddq_n): Likewise. (vandq): Likewise. (vabdq): Likewise. * config/arm/arm_mve_builtins.def (BINOP_NONE_NONE_IMM): Use it. (BINOP_NONE_NONE_NONE): Likewise. (BINOP_NONE_NONE_UNONE): Likewise. (BINOP_UNONE_NONE_IMM): Likewise. (BINOP_UNONE_NONE_NONE): Likewise. (BINOP_UNONE_UNONE_IMM): Likewise. (BINOP_UNONE_UNONE_NONE): Likewise. (BINOP_UNONE_UNONE_UNONE): Likewise. * config/arm/constraints.md (Ra): Define constraint to check constant is in the range of 0 to 7. (Rg): Define constriant to check the constant is one among 1, 2, 4 and 8. * config/arm/mve.md (mve_vabdq_<supf>): Define RTL pattern. (mve_vaddq_n_<supf>): Likewise. (mve_vaddvaq_<supf>): Likewise. (mve_vaddvq_p_<supf>): Likewise. (mve_vandq_<supf>): Likewise. (mve_vbicq_<supf>): Likewise. (mve_vbrsrq_n_<supf>): Likewise. (mve_vcaddq_rot270_<supf>): Likewise. (mve_vcaddq_rot90_<supf>): Likewise. (mve_vcmpcsq_n_u): Likewise. (mve_vcmpcsq_u): Likewise. (mve_vcmpeqq_n_<supf>): Likewise. (mve_vcmpeqq_<supf>): Likewise. (mve_vcmpgeq_n_s): Likewise. (mve_vcmpgeq_s): Likewise. (mve_vcmpgtq_n_s): Likewise. (mve_vcmpgtq_s): Likewise. (mve_vcmphiq_n_u): Likewise. (mve_vcmphiq_u): Likewise. (mve_vcmpleq_n_s): Likewise. (mve_vcmpleq_s): Likewise. (mve_vcmpltq_n_s): Likewise. (mve_vcmpltq_s): Likewise. (mve_vcmpneq_n_<supf>): Likewise. (mve_vddupq_n_u): Likewise. (mve_veorq_<supf>): Likewise. (mve_vhaddq_n_<supf>): Likewise. (mve_vhaddq_<supf>): Likewise. (mve_vhcaddq_rot270_s): Likewise. (mve_vhcaddq_rot90_s): Likewise. (mve_vhsubq_n_<supf>): Likewise. (mve_vhsubq_<supf>): Likewise. (mve_vidupq_n_u): Likewise. (mve_vmaxaq_s): Likewise. (mve_vmaxavq_s): Likewise. (mve_vmaxq_<supf>): Likewise. (mve_vmaxvq_<supf>): Likewise. (mve_vminaq_s): Likewise. (mve_vminavq_s): Likewise. (mve_vminq_<supf>): Likewise. (mve_vminvq_<supf>): Likewise. (mve_vmladavq_<supf>): Likewise. (mve_vmladavxq_s): Likewise. (mve_vmlsdavq_s): Likewise. (mve_vmlsdavxq_s): Likewise. (mve_vmulhq_<supf>): Likewise. (mve_vmullbq_int_<supf>): Likewise. (mve_vmulltq_int_<supf>): Likewise. (mve_vmulq_n_<supf>): Likewise. (mve_vmulq_<supf>): Likewise. (mve_vornq_<supf>): Likewise. (mve_vorrq_<supf>): Likewise. (mve_vqaddq_n_<supf>): Likewise. (mve_vqaddq_<supf>): Likewise. (mve_vqdmulhq_n_s): Likewise. (mve_vqdmulhq_s): Likewise. (mve_vqrdmulhq_n_s): Likewise. (mve_vqrdmulhq_s): Likewise. (mve_vqrshlq_n_<supf>): Likewise. (mve_vqrshlq_<supf>): Likewise. (mve_vqshlq_n_<supf>): Likewise. (mve_vqshlq_r_<supf>): Likewise. (mve_vqshlq_<supf>): Likewise. (mve_vqshluq_n_s): Likewise. (mve_vqsubq_n_<supf>): Likewise. (mve_vqsubq_<supf>): Likewise. (mve_vrhaddq_<supf>): Likewise. (mve_vrmulhq_<supf>): Likewise. (mve_vrshlq_n_<supf>): Likewise. (mve_vrshlq_<supf>): Likewise. (mve_vrshrq_n_<supf>): Likewise. (mve_vshlq_n_<supf>): Likewise. (mve_vshlq_r_<supf>): Likewise. (mve_vsubq_n_<supf>): Likewise. (mve_vsubq_<supf>): Likewise. * config/arm/predicates.md (mve_imm_7): Define predicate to check the matching constraint Ra. (mve_imm_selective_upto_8): Define predicate to check the matching constraint Rg. gcc/testsuite/ChangeLog: 2020-03-17 Andre Vieira <andre.simoesdiasvieira@arm.com> Mihail Ionescu <mihail.ionescu@arm.com> Srinath Parvathaneni <srinath.parvathaneni@arm.com> * gcc.target/arm/mve/intrinsics/vabdq_s16.c: New test. * gcc.target/arm/mve/intrinsics/vabdq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vabdq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vabdq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vabdq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vabdq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddvaq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddvaq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddvaq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddvaq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddvaq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddvaq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddvq_p_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddvq_p_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddvq_p_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddvq_p_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddvq_p_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddvq_p_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vandq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vandq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vandq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vandq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vandq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vandq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vbicq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vbicq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vbicq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vbicq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vbicq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vbicq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vbrsrq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vbrsrq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vbrsrq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vbrsrq_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vbrsrq_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vbrsrq_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot270_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot270_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot270_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot270_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot270_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot270_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot90_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot90_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot90_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot90_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot90_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot90_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpcsq_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpcsq_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpcsq_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpcsq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpcsq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpcsq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpeqq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgeq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgeq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgeq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgeq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgeq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgeq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgtq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgtq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgtq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgtq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgtq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpgtq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmphiq_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmphiq_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmphiq_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmphiq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmphiq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmphiq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpleq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpleq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpleq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpleq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpleq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpleq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpltq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpltq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpltq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpltq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpltq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpltq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpneq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpneq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpneq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpneq_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpneq_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmpneq_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/veorq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/veorq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/veorq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/veorq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/veorq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/veorq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vhaddq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vhaddq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vhaddq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vhaddq_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vhaddq_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vhaddq_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vhaddq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vhaddq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vhaddq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vhaddq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vhaddq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vhaddq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vhcaddq_rot270_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vhcaddq_rot270_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vhcaddq_rot270_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vhcaddq_rot90_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vhcaddq_rot90_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vhcaddq_rot90_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vhsubq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vhsubq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vhsubq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vhsubq_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vhsubq_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vhsubq_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vhsubq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vhsubq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vhsubq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vhsubq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vhsubq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vhsubq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxaq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxaq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxaq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxavq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxavq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxavq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxvq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxvq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxvq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxvq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxvq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxvq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vminaq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vminaq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vminaq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vminavq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vminavq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vminavq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vminq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vminq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vminq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vminq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vminq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vminq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vminvq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vminvq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vminvq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vminvq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vminvq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vminvq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmladavq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmladavq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmladavq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmladavq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmladavq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmladavq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmladavxq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmladavxq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmladavxq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlsdavq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlsdavq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlsdavq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlsdavxq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlsdavxq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmlsdavxq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulhq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulhq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulhq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulhq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulhq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulhq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmullbq_int_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmullbq_int_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmullbq_int_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmullbq_int_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmullbq_int_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmullbq_int_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulltq_int_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulltq_int_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulltq_int_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulltq_int_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulltq_int_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulltq_int_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vornq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vornq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vornq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vornq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vornq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vornq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vorrq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vorrq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vorrq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vorrq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vorrq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vorrq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqaddq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqaddq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqaddq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqaddq_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqaddq_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqaddq_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqaddq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqaddq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqaddq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqaddq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqaddq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqaddq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmulhq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmulhq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmulhq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmulhq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmulhq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqdmulhq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmulhq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmulhq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrdmulhq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshlq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshlq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshlq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshlq_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshlq_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshlq_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshlq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshlq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshlq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshlq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshlq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqrshlq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshlq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshlq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshlq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshlq_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshlq_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshlq_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshlq_r_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshlq_r_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshlq_r_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshlq_r_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshlq_r_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshlq_r_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshlq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshlq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshlq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshlq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshlq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshlq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshluq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshluq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqshluq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqsubq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqsubq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqsubq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqsubq_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqsubq_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqsubq_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqsubq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqsubq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqsubq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vqsubq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vqsubq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vqsubq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrhaddq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrhaddq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrhaddq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrhaddq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrhaddq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrhaddq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrmulhq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrmulhq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrmulhq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrmulhq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrmulhq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrmulhq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshlq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshlq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshlq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshlq_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshlq_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshlq_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshlq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshlq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshlq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshlq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshlq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshlq_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshrq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshrq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshrq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshrq_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshrq_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshrq_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_r_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_r_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_r_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_r_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_r_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_r_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_u8.c: Likewise.
-rw-r--r--gcc/ChangeLog868
-rw-r--r--gcc/config/arm/arm_mve.h3657
-rw-r--r--gcc/config/arm/arm_mve_builtins.def120
-rw-r--r--gcc/config/arm/constraints.md14
-rw-r--r--gcc/config/arm/mve.md1205
-rw-r--r--gcc/config/arm/predicates.md8
-rw-r--r--gcc/testsuite/ChangeLog365
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_n_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_n_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_n_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_n_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_n_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_n_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_n_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_n_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_n_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_n_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_n_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_n_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_n_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_n_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_n_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshluq_n_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshluq_n_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshluq_n_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_u8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_s16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_s32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_s8.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_u16.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_u32.c22
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_u8.c22
367 files changed, 14153 insertions, 4 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index c9e6530..feecf92 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -2,6 +2,874 @@
Mihail Ionescu <mihail.ionescu@arm.com>
Srinath Parvathaneni <srinath.parvathaneni@arm.com>
+ * config/arm/arm_mve.h (vsubq_u8): Define macro.
+ (vsubq_n_u8): Likewise.
+ (vrmulhq_u8): Likewise.
+ (vrhaddq_u8): Likewise.
+ (vqsubq_u8): Likewise.
+ (vqsubq_n_u8): Likewise.
+ (vqaddq_u8): Likewise.
+ (vqaddq_n_u8): Likewise.
+ (vorrq_u8): Likewise.
+ (vornq_u8): Likewise.
+ (vmulq_u8): Likewise.
+ (vmulq_n_u8): Likewise.
+ (vmulltq_int_u8): Likewise.
+ (vmullbq_int_u8): Likewise.
+ (vmulhq_u8): Likewise.
+ (vmladavq_u8): Likewise.
+ (vminvq_u8): Likewise.
+ (vminq_u8): Likewise.
+ (vmaxvq_u8): Likewise.
+ (vmaxq_u8): Likewise.
+ (vhsubq_u8): Likewise.
+ (vhsubq_n_u8): Likewise.
+ (vhaddq_u8): Likewise.
+ (vhaddq_n_u8): Likewise.
+ (veorq_u8): Likewise.
+ (vcmpneq_n_u8): Likewise.
+ (vcmphiq_u8): Likewise.
+ (vcmphiq_n_u8): Likewise.
+ (vcmpeqq_u8): Likewise.
+ (vcmpeqq_n_u8): Likewise.
+ (vcmpcsq_u8): Likewise.
+ (vcmpcsq_n_u8): Likewise.
+ (vcaddq_rot90_u8): Likewise.
+ (vcaddq_rot270_u8): Likewise.
+ (vbicq_u8): Likewise.
+ (vandq_u8): Likewise.
+ (vaddvq_p_u8): Likewise.
+ (vaddvaq_u8): Likewise.
+ (vaddq_n_u8): Likewise.
+ (vabdq_u8): Likewise.
+ (vshlq_r_u8): Likewise.
+ (vrshlq_u8): Likewise.
+ (vrshlq_n_u8): Likewise.
+ (vqshlq_u8): Likewise.
+ (vqshlq_r_u8): Likewise.
+ (vqrshlq_u8): Likewise.
+ (vqrshlq_n_u8): Likewise.
+ (vminavq_s8): Likewise.
+ (vminaq_s8): Likewise.
+ (vmaxavq_s8): Likewise.
+ (vmaxaq_s8): Likewise.
+ (vbrsrq_n_u8): Likewise.
+ (vshlq_n_u8): Likewise.
+ (vrshrq_n_u8): Likewise.
+ (vqshlq_n_u8): Likewise.
+ (vcmpneq_n_s8): Likewise.
+ (vcmpltq_s8): Likewise.
+ (vcmpltq_n_s8): Likewise.
+ (vcmpleq_s8): Likewise.
+ (vcmpleq_n_s8): Likewise.
+ (vcmpgtq_s8): Likewise.
+ (vcmpgtq_n_s8): Likewise.
+ (vcmpgeq_s8): Likewise.
+ (vcmpgeq_n_s8): Likewise.
+ (vcmpeqq_s8): Likewise.
+ (vcmpeqq_n_s8): Likewise.
+ (vqshluq_n_s8): Likewise.
+ (vaddvq_p_s8): Likewise.
+ (vsubq_s8): Likewise.
+ (vsubq_n_s8): Likewise.
+ (vshlq_r_s8): Likewise.
+ (vrshlq_s8): Likewise.
+ (vrshlq_n_s8): Likewise.
+ (vrmulhq_s8): Likewise.
+ (vrhaddq_s8): Likewise.
+ (vqsubq_s8): Likewise.
+ (vqsubq_n_s8): Likewise.
+ (vqshlq_s8): Likewise.
+ (vqshlq_r_s8): Likewise.
+ (vqrshlq_s8): Likewise.
+ (vqrshlq_n_s8): Likewise.
+ (vqrdmulhq_s8): Likewise.
+ (vqrdmulhq_n_s8): Likewise.
+ (vqdmulhq_s8): Likewise.
+ (vqdmulhq_n_s8): Likewise.
+ (vqaddq_s8): Likewise.
+ (vqaddq_n_s8): Likewise.
+ (vorrq_s8): Likewise.
+ (vornq_s8): Likewise.
+ (vmulq_s8): Likewise.
+ (vmulq_n_s8): Likewise.
+ (vmulltq_int_s8): Likewise.
+ (vmullbq_int_s8): Likewise.
+ (vmulhq_s8): Likewise.
+ (vmlsdavxq_s8): Likewise.
+ (vmlsdavq_s8): Likewise.
+ (vmladavxq_s8): Likewise.
+ (vmladavq_s8): Likewise.
+ (vminvq_s8): Likewise.
+ (vminq_s8): Likewise.
+ (vmaxvq_s8): Likewise.
+ (vmaxq_s8): Likewise.
+ (vhsubq_s8): Likewise.
+ (vhsubq_n_s8): Likewise.
+ (vhcaddq_rot90_s8): Likewise.
+ (vhcaddq_rot270_s8): Likewise.
+ (vhaddq_s8): Likewise.
+ (vhaddq_n_s8): Likewise.
+ (veorq_s8): Likewise.
+ (vcaddq_rot90_s8): Likewise.
+ (vcaddq_rot270_s8): Likewise.
+ (vbrsrq_n_s8): Likewise.
+ (vbicq_s8): Likewise.
+ (vandq_s8): Likewise.
+ (vaddvaq_s8): Likewise.
+ (vaddq_n_s8): Likewise.
+ (vabdq_s8): Likewise.
+ (vshlq_n_s8): Likewise.
+ (vrshrq_n_s8): Likewise.
+ (vqshlq_n_s8): Likewise.
+ (vsubq_u16): Likewise.
+ (vsubq_n_u16): Likewise.
+ (vrmulhq_u16): Likewise.
+ (vrhaddq_u16): Likewise.
+ (vqsubq_u16): Likewise.
+ (vqsubq_n_u16): Likewise.
+ (vqaddq_u16): Likewise.
+ (vqaddq_n_u16): Likewise.
+ (vorrq_u16): Likewise.
+ (vornq_u16): Likewise.
+ (vmulq_u16): Likewise.
+ (vmulq_n_u16): Likewise.
+ (vmulltq_int_u16): Likewise.
+ (vmullbq_int_u16): Likewise.
+ (vmulhq_u16): Likewise.
+ (vmladavq_u16): Likewise.
+ (vminvq_u16): Likewise.
+ (vminq_u16): Likewise.
+ (vmaxvq_u16): Likewise.
+ (vmaxq_u16): Likewise.
+ (vhsubq_u16): Likewise.
+ (vhsubq_n_u16): Likewise.
+ (vhaddq_u16): Likewise.
+ (vhaddq_n_u16): Likewise.
+ (veorq_u16): Likewise.
+ (vcmpneq_n_u16): Likewise.
+ (vcmphiq_u16): Likewise.
+ (vcmphiq_n_u16): Likewise.
+ (vcmpeqq_u16): Likewise.
+ (vcmpeqq_n_u16): Likewise.
+ (vcmpcsq_u16): Likewise.
+ (vcmpcsq_n_u16): Likewise.
+ (vcaddq_rot90_u16): Likewise.
+ (vcaddq_rot270_u16): Likewise.
+ (vbicq_u16): Likewise.
+ (vandq_u16): Likewise.
+ (vaddvq_p_u16): Likewise.
+ (vaddvaq_u16): Likewise.
+ (vaddq_n_u16): Likewise.
+ (vabdq_u16): Likewise.
+ (vshlq_r_u16): Likewise.
+ (vrshlq_u16): Likewise.
+ (vrshlq_n_u16): Likewise.
+ (vqshlq_u16): Likewise.
+ (vqshlq_r_u16): Likewise.
+ (vqrshlq_u16): Likewise.
+ (vqrshlq_n_u16): Likewise.
+ (vminavq_s16): Likewise.
+ (vminaq_s16): Likewise.
+ (vmaxavq_s16): Likewise.
+ (vmaxaq_s16): Likewise.
+ (vbrsrq_n_u16): Likewise.
+ (vshlq_n_u16): Likewise.
+ (vrshrq_n_u16): Likewise.
+ (vqshlq_n_u16): Likewise.
+ (vcmpneq_n_s16): Likewise.
+ (vcmpltq_s16): Likewise.
+ (vcmpltq_n_s16): Likewise.
+ (vcmpleq_s16): Likewise.
+ (vcmpleq_n_s16): Likewise.
+ (vcmpgtq_s16): Likewise.
+ (vcmpgtq_n_s16): Likewise.
+ (vcmpgeq_s16): Likewise.
+ (vcmpgeq_n_s16): Likewise.
+ (vcmpeqq_s16): Likewise.
+ (vcmpeqq_n_s16): Likewise.
+ (vqshluq_n_s16): Likewise.
+ (vaddvq_p_s16): Likewise.
+ (vsubq_s16): Likewise.
+ (vsubq_n_s16): Likewise.
+ (vshlq_r_s16): Likewise.
+ (vrshlq_s16): Likewise.
+ (vrshlq_n_s16): Likewise.
+ (vrmulhq_s16): Likewise.
+ (vrhaddq_s16): Likewise.
+ (vqsubq_s16): Likewise.
+ (vqsubq_n_s16): Likewise.
+ (vqshlq_s16): Likewise.
+ (vqshlq_r_s16): Likewise.
+ (vqrshlq_s16): Likewise.
+ (vqrshlq_n_s16): Likewise.
+ (vqrdmulhq_s16): Likewise.
+ (vqrdmulhq_n_s16): Likewise.
+ (vqdmulhq_s16): Likewise.
+ (vqdmulhq_n_s16): Likewise.
+ (vqaddq_s16): Likewise.
+ (vqaddq_n_s16): Likewise.
+ (vorrq_s16): Likewise.
+ (vornq_s16): Likewise.
+ (vmulq_s16): Likewise.
+ (vmulq_n_s16): Likewise.
+ (vmulltq_int_s16): Likewise.
+ (vmullbq_int_s16): Likewise.
+ (vmulhq_s16): Likewise.
+ (vmlsdavxq_s16): Likewise.
+ (vmlsdavq_s16): Likewise.
+ (vmladavxq_s16): Likewise.
+ (vmladavq_s16): Likewise.
+ (vminvq_s16): Likewise.
+ (vminq_s16): Likewise.
+ (vmaxvq_s16): Likewise.
+ (vmaxq_s16): Likewise.
+ (vhsubq_s16): Likewise.
+ (vhsubq_n_s16): Likewise.
+ (vhcaddq_rot90_s16): Likewise.
+ (vhcaddq_rot270_s16): Likewise.
+ (vhaddq_s16): Likewise.
+ (vhaddq_n_s16): Likewise.
+ (veorq_s16): Likewise.
+ (vcaddq_rot90_s16): Likewise.
+ (vcaddq_rot270_s16): Likewise.
+ (vbrsrq_n_s16): Likewise.
+ (vbicq_s16): Likewise.
+ (vandq_s16): Likewise.
+ (vaddvaq_s16): Likewise.
+ (vaddq_n_s16): Likewise.
+ (vabdq_s16): Likewise.
+ (vshlq_n_s16): Likewise.
+ (vrshrq_n_s16): Likewise.
+ (vqshlq_n_s16): Likewise.
+ (vsubq_u32): Likewise.
+ (vsubq_n_u32): Likewise.
+ (vrmulhq_u32): Likewise.
+ (vrhaddq_u32): Likewise.
+ (vqsubq_u32): Likewise.
+ (vqsubq_n_u32): Likewise.
+ (vqaddq_u32): Likewise.
+ (vqaddq_n_u32): Likewise.
+ (vorrq_u32): Likewise.
+ (vornq_u32): Likewise.
+ (vmulq_u32): Likewise.
+ (vmulq_n_u32): Likewise.
+ (vmulltq_int_u32): Likewise.
+ (vmullbq_int_u32): Likewise.
+ (vmulhq_u32): Likewise.
+ (vmladavq_u32): Likewise.
+ (vminvq_u32): Likewise.
+ (vminq_u32): Likewise.
+ (vmaxvq_u32): Likewise.
+ (vmaxq_u32): Likewise.
+ (vhsubq_u32): Likewise.
+ (vhsubq_n_u32): Likewise.
+ (vhaddq_u32): Likewise.
+ (vhaddq_n_u32): Likewise.
+ (veorq_u32): Likewise.
+ (vcmpneq_n_u32): Likewise.
+ (vcmphiq_u32): Likewise.
+ (vcmphiq_n_u32): Likewise.
+ (vcmpeqq_u32): Likewise.
+ (vcmpeqq_n_u32): Likewise.
+ (vcmpcsq_u32): Likewise.
+ (vcmpcsq_n_u32): Likewise.
+ (vcaddq_rot90_u32): Likewise.
+ (vcaddq_rot270_u32): Likewise.
+ (vbicq_u32): Likewise.
+ (vandq_u32): Likewise.
+ (vaddvq_p_u32): Likewise.
+ (vaddvaq_u32): Likewise.
+ (vaddq_n_u32): Likewise.
+ (vabdq_u32): Likewise.
+ (vshlq_r_u32): Likewise.
+ (vrshlq_u32): Likewise.
+ (vrshlq_n_u32): Likewise.
+ (vqshlq_u32): Likewise.
+ (vqshlq_r_u32): Likewise.
+ (vqrshlq_u32): Likewise.
+ (vqrshlq_n_u32): Likewise.
+ (vminavq_s32): Likewise.
+ (vminaq_s32): Likewise.
+ (vmaxavq_s32): Likewise.
+ (vmaxaq_s32): Likewise.
+ (vbrsrq_n_u32): Likewise.
+ (vshlq_n_u32): Likewise.
+ (vrshrq_n_u32): Likewise.
+ (vqshlq_n_u32): Likewise.
+ (vcmpneq_n_s32): Likewise.
+ (vcmpltq_s32): Likewise.
+ (vcmpltq_n_s32): Likewise.
+ (vcmpleq_s32): Likewise.
+ (vcmpleq_n_s32): Likewise.
+ (vcmpgtq_s32): Likewise.
+ (vcmpgtq_n_s32): Likewise.
+ (vcmpgeq_s32): Likewise.
+ (vcmpgeq_n_s32): Likewise.
+ (vcmpeqq_s32): Likewise.
+ (vcmpeqq_n_s32): Likewise.
+ (vqshluq_n_s32): Likewise.
+ (vaddvq_p_s32): Likewise.
+ (vsubq_s32): Likewise.
+ (vsubq_n_s32): Likewise.
+ (vshlq_r_s32): Likewise.
+ (vrshlq_s32): Likewise.
+ (vrshlq_n_s32): Likewise.
+ (vrmulhq_s32): Likewise.
+ (vrhaddq_s32): Likewise.
+ (vqsubq_s32): Likewise.
+ (vqsubq_n_s32): Likewise.
+ (vqshlq_s32): Likewise.
+ (vqshlq_r_s32): Likewise.
+ (vqrshlq_s32): Likewise.
+ (vqrshlq_n_s32): Likewise.
+ (vqrdmulhq_s32): Likewise.
+ (vqrdmulhq_n_s32): Likewise.
+ (vqdmulhq_s32): Likewise.
+ (vqdmulhq_n_s32): Likewise.
+ (vqaddq_s32): Likewise.
+ (vqaddq_n_s32): Likewise.
+ (vorrq_s32): Likewise.
+ (vornq_s32): Likewise.
+ (vmulq_s32): Likewise.
+ (vmulq_n_s32): Likewise.
+ (vmulltq_int_s32): Likewise.
+ (vmullbq_int_s32): Likewise.
+ (vmulhq_s32): Likewise.
+ (vmlsdavxq_s32): Likewise.
+ (vmlsdavq_s32): Likewise.
+ (vmladavxq_s32): Likewise.
+ (vmladavq_s32): Likewise.
+ (vminvq_s32): Likewise.
+ (vminq_s32): Likewise.
+ (vmaxvq_s32): Likewise.
+ (vmaxq_s32): Likewise.
+ (vhsubq_s32): Likewise.
+ (vhsubq_n_s32): Likewise.
+ (vhcaddq_rot90_s32): Likewise.
+ (vhcaddq_rot270_s32): Likewise.
+ (vhaddq_s32): Likewise.
+ (vhaddq_n_s32): Likewise.
+ (veorq_s32): Likewise.
+ (vcaddq_rot90_s32): Likewise.
+ (vcaddq_rot270_s32): Likewise.
+ (vbrsrq_n_s32): Likewise.
+ (vbicq_s32): Likewise.
+ (vandq_s32): Likewise.
+ (vaddvaq_s32): Likewise.
+ (vaddq_n_s32): Likewise.
+ (vabdq_s32): Likewise.
+ (vshlq_n_s32): Likewise.
+ (vrshrq_n_s32): Likewise.
+ (vqshlq_n_s32): Likewise.
+ (__arm_vsubq_u8): Define intrinsic.
+ (__arm_vsubq_n_u8): Likewise.
+ (__arm_vrmulhq_u8): Likewise.
+ (__arm_vrhaddq_u8): Likewise.
+ (__arm_vqsubq_u8): Likewise.
+ (__arm_vqsubq_n_u8): Likewise.
+ (__arm_vqaddq_u8): Likewise.
+ (__arm_vqaddq_n_u8): Likewise.
+ (__arm_vorrq_u8): Likewise.
+ (__arm_vornq_u8): Likewise.
+ (__arm_vmulq_u8): Likewise.
+ (__arm_vmulq_n_u8): Likewise.
+ (__arm_vmulltq_int_u8): Likewise.
+ (__arm_vmullbq_int_u8): Likewise.
+ (__arm_vmulhq_u8): Likewise.
+ (__arm_vmladavq_u8): Likewise.
+ (__arm_vminvq_u8): Likewise.
+ (__arm_vminq_u8): Likewise.
+ (__arm_vmaxvq_u8): Likewise.
+ (__arm_vmaxq_u8): Likewise.
+ (__arm_vhsubq_u8): Likewise.
+ (__arm_vhsubq_n_u8): Likewise.
+ (__arm_vhaddq_u8): Likewise.
+ (__arm_vhaddq_n_u8): Likewise.
+ (__arm_veorq_u8): Likewise.
+ (__arm_vcmpneq_n_u8): Likewise.
+ (__arm_vcmphiq_u8): Likewise.
+ (__arm_vcmphiq_n_u8): Likewise.
+ (__arm_vcmpeqq_u8): Likewise.
+ (__arm_vcmpeqq_n_u8): Likewise.
+ (__arm_vcmpcsq_u8): Likewise.
+ (__arm_vcmpcsq_n_u8): Likewise.
+ (__arm_vcaddq_rot90_u8): Likewise.
+ (__arm_vcaddq_rot270_u8): Likewise.
+ (__arm_vbicq_u8): Likewise.
+ (__arm_vandq_u8): Likewise.
+ (__arm_vaddvq_p_u8): Likewise.
+ (__arm_vaddvaq_u8): Likewise.
+ (__arm_vaddq_n_u8): Likewise.
+ (__arm_vabdq_u8): Likewise.
+ (__arm_vshlq_r_u8): Likewise.
+ (__arm_vrshlq_u8): Likewise.
+ (__arm_vrshlq_n_u8): Likewise.
+ (__arm_vqshlq_u8): Likewise.
+ (__arm_vqshlq_r_u8): Likewise.
+ (__arm_vqrshlq_u8): Likewise.
+ (__arm_vqrshlq_n_u8): Likewise.
+ (__arm_vminavq_s8): Likewise.
+ (__arm_vminaq_s8): Likewise.
+ (__arm_vmaxavq_s8): Likewise.
+ (__arm_vmaxaq_s8): Likewise.
+ (__arm_vbrsrq_n_u8): Likewise.
+ (__arm_vshlq_n_u8): Likewise.
+ (__arm_vrshrq_n_u8): Likewise.
+ (__arm_vqshlq_n_u8): Likewise.
+ (__arm_vcmpneq_n_s8): Likewise.
+ (__arm_vcmpltq_s8): Likewise.
+ (__arm_vcmpltq_n_s8): Likewise.
+ (__arm_vcmpleq_s8): Likewise.
+ (__arm_vcmpleq_n_s8): Likewise.
+ (__arm_vcmpgtq_s8): Likewise.
+ (__arm_vcmpgtq_n_s8): Likewise.
+ (__arm_vcmpgeq_s8): Likewise.
+ (__arm_vcmpgeq_n_s8): Likewise.
+ (__arm_vcmpeqq_s8): Likewise.
+ (__arm_vcmpeqq_n_s8): Likewise.
+ (__arm_vqshluq_n_s8): Likewise.
+ (__arm_vaddvq_p_s8): Likewise.
+ (__arm_vsubq_s8): Likewise.
+ (__arm_vsubq_n_s8): Likewise.
+ (__arm_vshlq_r_s8): Likewise.
+ (__arm_vrshlq_s8): Likewise.
+ (__arm_vrshlq_n_s8): Likewise.
+ (__arm_vrmulhq_s8): Likewise.
+ (__arm_vrhaddq_s8): Likewise.
+ (__arm_vqsubq_s8): Likewise.
+ (__arm_vqsubq_n_s8): Likewise.
+ (__arm_vqshlq_s8): Likewise.
+ (__arm_vqshlq_r_s8): Likewise.
+ (__arm_vqrshlq_s8): Likewise.
+ (__arm_vqrshlq_n_s8): Likewise.
+ (__arm_vqrdmulhq_s8): Likewise.
+ (__arm_vqrdmulhq_n_s8): Likewise.
+ (__arm_vqdmulhq_s8): Likewise.
+ (__arm_vqdmulhq_n_s8): Likewise.
+ (__arm_vqaddq_s8): Likewise.
+ (__arm_vqaddq_n_s8): Likewise.
+ (__arm_vorrq_s8): Likewise.
+ (__arm_vornq_s8): Likewise.
+ (__arm_vmulq_s8): Likewise.
+ (__arm_vmulq_n_s8): Likewise.
+ (__arm_vmulltq_int_s8): Likewise.
+ (__arm_vmullbq_int_s8): Likewise.
+ (__arm_vmulhq_s8): Likewise.
+ (__arm_vmlsdavxq_s8): Likewise.
+ (__arm_vmlsdavq_s8): Likewise.
+ (__arm_vmladavxq_s8): Likewise.
+ (__arm_vmladavq_s8): Likewise.
+ (__arm_vminvq_s8): Likewise.
+ (__arm_vminq_s8): Likewise.
+ (__arm_vmaxvq_s8): Likewise.
+ (__arm_vmaxq_s8): Likewise.
+ (__arm_vhsubq_s8): Likewise.
+ (__arm_vhsubq_n_s8): Likewise.
+ (__arm_vhcaddq_rot90_s8): Likewise.
+ (__arm_vhcaddq_rot270_s8): Likewise.
+ (__arm_vhaddq_s8): Likewise.
+ (__arm_vhaddq_n_s8): Likewise.
+ (__arm_veorq_s8): Likewise.
+ (__arm_vcaddq_rot90_s8): Likewise.
+ (__arm_vcaddq_rot270_s8): Likewise.
+ (__arm_vbrsrq_n_s8): Likewise.
+ (__arm_vbicq_s8): Likewise.
+ (__arm_vandq_s8): Likewise.
+ (__arm_vaddvaq_s8): Likewise.
+ (__arm_vaddq_n_s8): Likewise.
+ (__arm_vabdq_s8): Likewise.
+ (__arm_vshlq_n_s8): Likewise.
+ (__arm_vrshrq_n_s8): Likewise.
+ (__arm_vqshlq_n_s8): Likewise.
+ (__arm_vsubq_u16): Likewise.
+ (__arm_vsubq_n_u16): Likewise.
+ (__arm_vrmulhq_u16): Likewise.
+ (__arm_vrhaddq_u16): Likewise.
+ (__arm_vqsubq_u16): Likewise.
+ (__arm_vqsubq_n_u16): Likewise.
+ (__arm_vqaddq_u16): Likewise.
+ (__arm_vqaddq_n_u16): Likewise.
+ (__arm_vorrq_u16): Likewise.
+ (__arm_vornq_u16): Likewise.
+ (__arm_vmulq_u16): Likewise.
+ (__arm_vmulq_n_u16): Likewise.
+ (__arm_vmulltq_int_u16): Likewise.
+ (__arm_vmullbq_int_u16): Likewise.
+ (__arm_vmulhq_u16): Likewise.
+ (__arm_vmladavq_u16): Likewise.
+ (__arm_vminvq_u16): Likewise.
+ (__arm_vminq_u16): Likewise.
+ (__arm_vmaxvq_u16): Likewise.
+ (__arm_vmaxq_u16): Likewise.
+ (__arm_vhsubq_u16): Likewise.
+ (__arm_vhsubq_n_u16): Likewise.
+ (__arm_vhaddq_u16): Likewise.
+ (__arm_vhaddq_n_u16): Likewise.
+ (__arm_veorq_u16): Likewise.
+ (__arm_vcmpneq_n_u16): Likewise.
+ (__arm_vcmphiq_u16): Likewise.
+ (__arm_vcmphiq_n_u16): Likewise.
+ (__arm_vcmpeqq_u16): Likewise.
+ (__arm_vcmpeqq_n_u16): Likewise.
+ (__arm_vcmpcsq_u16): Likewise.
+ (__arm_vcmpcsq_n_u16): Likewise.
+ (__arm_vcaddq_rot90_u16): Likewise.
+ (__arm_vcaddq_rot270_u16): Likewise.
+ (__arm_vbicq_u16): Likewise.
+ (__arm_vandq_u16): Likewise.
+ (__arm_vaddvq_p_u16): Likewise.
+ (__arm_vaddvaq_u16): Likewise.
+ (__arm_vaddq_n_u16): Likewise.
+ (__arm_vabdq_u16): Likewise.
+ (__arm_vshlq_r_u16): Likewise.
+ (__arm_vrshlq_u16): Likewise.
+ (__arm_vrshlq_n_u16): Likewise.
+ (__arm_vqshlq_u16): Likewise.
+ (__arm_vqshlq_r_u16): Likewise.
+ (__arm_vqrshlq_u16): Likewise.
+ (__arm_vqrshlq_n_u16): Likewise.
+ (__arm_vminavq_s16): Likewise.
+ (__arm_vminaq_s16): Likewise.
+ (__arm_vmaxavq_s16): Likewise.
+ (__arm_vmaxaq_s16): Likewise.
+ (__arm_vbrsrq_n_u16): Likewise.
+ (__arm_vshlq_n_u16): Likewise.
+ (__arm_vrshrq_n_u16): Likewise.
+ (__arm_vqshlq_n_u16): Likewise.
+ (__arm_vcmpneq_n_s16): Likewise.
+ (__arm_vcmpltq_s16): Likewise.
+ (__arm_vcmpltq_n_s16): Likewise.
+ (__arm_vcmpleq_s16): Likewise.
+ (__arm_vcmpleq_n_s16): Likewise.
+ (__arm_vcmpgtq_s16): Likewise.
+ (__arm_vcmpgtq_n_s16): Likewise.
+ (__arm_vcmpgeq_s16): Likewise.
+ (__arm_vcmpgeq_n_s16): Likewise.
+ (__arm_vcmpeqq_s16): Likewise.
+ (__arm_vcmpeqq_n_s16): Likewise.
+ (__arm_vqshluq_n_s16): Likewise.
+ (__arm_vaddvq_p_s16): Likewise.
+ (__arm_vsubq_s16): Likewise.
+ (__arm_vsubq_n_s16): Likewise.
+ (__arm_vshlq_r_s16): Likewise.
+ (__arm_vrshlq_s16): Likewise.
+ (__arm_vrshlq_n_s16): Likewise.
+ (__arm_vrmulhq_s16): Likewise.
+ (__arm_vrhaddq_s16): Likewise.
+ (__arm_vqsubq_s16): Likewise.
+ (__arm_vqsubq_n_s16): Likewise.
+ (__arm_vqshlq_s16): Likewise.
+ (__arm_vqshlq_r_s16): Likewise.
+ (__arm_vqrshlq_s16): Likewise.
+ (__arm_vqrshlq_n_s16): Likewise.
+ (__arm_vqrdmulhq_s16): Likewise.
+ (__arm_vqrdmulhq_n_s16): Likewise.
+ (__arm_vqdmulhq_s16): Likewise.
+ (__arm_vqdmulhq_n_s16): Likewise.
+ (__arm_vqaddq_s16): Likewise.
+ (__arm_vqaddq_n_s16): Likewise.
+ (__arm_vorrq_s16): Likewise.
+ (__arm_vornq_s16): Likewise.
+ (__arm_vmulq_s16): Likewise.
+ (__arm_vmulq_n_s16): Likewise.
+ (__arm_vmulltq_int_s16): Likewise.
+ (__arm_vmullbq_int_s16): Likewise.
+ (__arm_vmulhq_s16): Likewise.
+ (__arm_vmlsdavxq_s16): Likewise.
+ (__arm_vmlsdavq_s16): Likewise.
+ (__arm_vmladavxq_s16): Likewise.
+ (__arm_vmladavq_s16): Likewise.
+ (__arm_vminvq_s16): Likewise.
+ (__arm_vminq_s16): Likewise.
+ (__arm_vmaxvq_s16): Likewise.
+ (__arm_vmaxq_s16): Likewise.
+ (__arm_vhsubq_s16): Likewise.
+ (__arm_vhsubq_n_s16): Likewise.
+ (__arm_vhcaddq_rot90_s16): Likewise.
+ (__arm_vhcaddq_rot270_s16): Likewise.
+ (__arm_vhaddq_s16): Likewise.
+ (__arm_vhaddq_n_s16): Likewise.
+ (__arm_veorq_s16): Likewise.
+ (__arm_vcaddq_rot90_s16): Likewise.
+ (__arm_vcaddq_rot270_s16): Likewise.
+ (__arm_vbrsrq_n_s16): Likewise.
+ (__arm_vbicq_s16): Likewise.
+ (__arm_vandq_s16): Likewise.
+ (__arm_vaddvaq_s16): Likewise.
+ (__arm_vaddq_n_s16): Likewise.
+ (__arm_vabdq_s16): Likewise.
+ (__arm_vshlq_n_s16): Likewise.
+ (__arm_vrshrq_n_s16): Likewise.
+ (__arm_vqshlq_n_s16): Likewise.
+ (__arm_vsubq_u32): Likewise.
+ (__arm_vsubq_n_u32): Likewise.
+ (__arm_vrmulhq_u32): Likewise.
+ (__arm_vrhaddq_u32): Likewise.
+ (__arm_vqsubq_u32): Likewise.
+ (__arm_vqsubq_n_u32): Likewise.
+ (__arm_vqaddq_u32): Likewise.
+ (__arm_vqaddq_n_u32): Likewise.
+ (__arm_vorrq_u32): Likewise.
+ (__arm_vornq_u32): Likewise.
+ (__arm_vmulq_u32): Likewise.
+ (__arm_vmulq_n_u32): Likewise.
+ (__arm_vmulltq_int_u32): Likewise.
+ (__arm_vmullbq_int_u32): Likewise.
+ (__arm_vmulhq_u32): Likewise.
+ (__arm_vmladavq_u32): Likewise.
+ (__arm_vminvq_u32): Likewise.
+ (__arm_vminq_u32): Likewise.
+ (__arm_vmaxvq_u32): Likewise.
+ (__arm_vmaxq_u32): Likewise.
+ (__arm_vhsubq_u32): Likewise.
+ (__arm_vhsubq_n_u32): Likewise.
+ (__arm_vhaddq_u32): Likewise.
+ (__arm_vhaddq_n_u32): Likewise.
+ (__arm_veorq_u32): Likewise.
+ (__arm_vcmpneq_n_u32): Likewise.
+ (__arm_vcmphiq_u32): Likewise.
+ (__arm_vcmphiq_n_u32): Likewise.
+ (__arm_vcmpeqq_u32): Likewise.
+ (__arm_vcmpeqq_n_u32): Likewise.
+ (__arm_vcmpcsq_u32): Likewise.
+ (__arm_vcmpcsq_n_u32): Likewise.
+ (__arm_vcaddq_rot90_u32): Likewise.
+ (__arm_vcaddq_rot270_u32): Likewise.
+ (__arm_vbicq_u32): Likewise.
+ (__arm_vandq_u32): Likewise.
+ (__arm_vaddvq_p_u32): Likewise.
+ (__arm_vaddvaq_u32): Likewise.
+ (__arm_vaddq_n_u32): Likewise.
+ (__arm_vabdq_u32): Likewise.
+ (__arm_vshlq_r_u32): Likewise.
+ (__arm_vrshlq_u32): Likewise.
+ (__arm_vrshlq_n_u32): Likewise.
+ (__arm_vqshlq_u32): Likewise.
+ (__arm_vqshlq_r_u32): Likewise.
+ (__arm_vqrshlq_u32): Likewise.
+ (__arm_vqrshlq_n_u32): Likewise.
+ (__arm_vminavq_s32): Likewise.
+ (__arm_vminaq_s32): Likewise.
+ (__arm_vmaxavq_s32): Likewise.
+ (__arm_vmaxaq_s32): Likewise.
+ (__arm_vbrsrq_n_u32): Likewise.
+ (__arm_vshlq_n_u32): Likewise.
+ (__arm_vrshrq_n_u32): Likewise.
+ (__arm_vqshlq_n_u32): Likewise.
+ (__arm_vcmpneq_n_s32): Likewise.
+ (__arm_vcmpltq_s32): Likewise.
+ (__arm_vcmpltq_n_s32): Likewise.
+ (__arm_vcmpleq_s32): Likewise.
+ (__arm_vcmpleq_n_s32): Likewise.
+ (__arm_vcmpgtq_s32): Likewise.
+ (__arm_vcmpgtq_n_s32): Likewise.
+ (__arm_vcmpgeq_s32): Likewise.
+ (__arm_vcmpgeq_n_s32): Likewise.
+ (__arm_vcmpeqq_s32): Likewise.
+ (__arm_vcmpeqq_n_s32): Likewise.
+ (__arm_vqshluq_n_s32): Likewise.
+ (__arm_vaddvq_p_s32): Likewise.
+ (__arm_vsubq_s32): Likewise.
+ (__arm_vsubq_n_s32): Likewise.
+ (__arm_vshlq_r_s32): Likewise.
+ (__arm_vrshlq_s32): Likewise.
+ (__arm_vrshlq_n_s32): Likewise.
+ (__arm_vrmulhq_s32): Likewise.
+ (__arm_vrhaddq_s32): Likewise.
+ (__arm_vqsubq_s32): Likewise.
+ (__arm_vqsubq_n_s32): Likewise.
+ (__arm_vqshlq_s32): Likewise.
+ (__arm_vqshlq_r_s32): Likewise.
+ (__arm_vqrshlq_s32): Likewise.
+ (__arm_vqrshlq_n_s32): Likewise.
+ (__arm_vqrdmulhq_s32): Likewise.
+ (__arm_vqrdmulhq_n_s32): Likewise.
+ (__arm_vqdmulhq_s32): Likewise.
+ (__arm_vqdmulhq_n_s32): Likewise.
+ (__arm_vqaddq_s32): Likewise.
+ (__arm_vqaddq_n_s32): Likewise.
+ (__arm_vorrq_s32): Likewise.
+ (__arm_vornq_s32): Likewise.
+ (__arm_vmulq_s32): Likewise.
+ (__arm_vmulq_n_s32): Likewise.
+ (__arm_vmulltq_int_s32): Likewise.
+ (__arm_vmullbq_int_s32): Likewise.
+ (__arm_vmulhq_s32): Likewise.
+ (__arm_vmlsdavxq_s32): Likewise.
+ (__arm_vmlsdavq_s32): Likewise.
+ (__arm_vmladavxq_s32): Likewise.
+ (__arm_vmladavq_s32): Likewise.
+ (__arm_vminvq_s32): Likewise.
+ (__arm_vminq_s32): Likewise.
+ (__arm_vmaxvq_s32): Likewise.
+ (__arm_vmaxq_s32): Likewise.
+ (__arm_vhsubq_s32): Likewise.
+ (__arm_vhsubq_n_s32): Likewise.
+ (__arm_vhcaddq_rot90_s32): Likewise.
+ (__arm_vhcaddq_rot270_s32): Likewise.
+ (__arm_vhaddq_s32): Likewise.
+ (__arm_vhaddq_n_s32): Likewise.
+ (__arm_veorq_s32): Likewise.
+ (__arm_vcaddq_rot90_s32): Likewise.
+ (__arm_vcaddq_rot270_s32): Likewise.
+ (__arm_vbrsrq_n_s32): Likewise.
+ (__arm_vbicq_s32): Likewise.
+ (__arm_vandq_s32): Likewise.
+ (__arm_vaddvaq_s32): Likewise.
+ (__arm_vaddq_n_s32): Likewise.
+ (__arm_vabdq_s32): Likewise.
+ (__arm_vshlq_n_s32): Likewise.
+ (__arm_vrshrq_n_s32): Likewise.
+ (__arm_vqshlq_n_s32): Likewise.
+ (vsubq): Define polymorphic variant.
+ (vsubq_n): Likewise.
+ (vshlq_r): Likewise.
+ (vrshlq_n): Likewise.
+ (vrshlq): Likewise.
+ (vrmulhq): Likewise.
+ (vrhaddq): Likewise.
+ (vqsubq_n): Likewise.
+ (vqsubq): Likewise.
+ (vqshlq): Likewise.
+ (vqshlq_r): Likewise.
+ (vqshluq): Likewise.
+ (vrshrq_n): Likewise.
+ (vshlq_n): Likewise.
+ (vqshluq_n): Likewise.
+ (vqshlq_n): Likewise.
+ (vqrshlq_n): Likewise.
+ (vqrshlq): Likewise.
+ (vqrdmulhq_n): Likewise.
+ (vqrdmulhq): Likewise.
+ (vqdmulhq_n): Likewise.
+ (vqdmulhq): Likewise.
+ (vqaddq_n): Likewise.
+ (vqaddq): Likewise.
+ (vorrq_n): Likewise.
+ (vorrq): Likewise.
+ (vornq): Likewise.
+ (vmulq_n): Likewise.
+ (vmulq): Likewise.
+ (vmulltq_int): Likewise.
+ (vmullbq_int): Likewise.
+ (vmulhq): Likewise.
+ (vminq): Likewise.
+ (vminaq): Likewise.
+ (vmaxq): Likewise.
+ (vmaxaq): Likewise.
+ (vhsubq_n): Likewise.
+ (vhsubq): Likewise.
+ (vhcaddq_rot90): Likewise.
+ (vhcaddq_rot270): Likewise.
+ (vhaddq_n): Likewise.
+ (vhaddq): Likewise.
+ (veorq): Likewise.
+ (vcaddq_rot90): Likewise.
+ (vcaddq_rot270): Likewise.
+ (vbrsrq_n): Likewise.
+ (vbicq_n): Likewise.
+ (vbicq): Likewise.
+ (vaddq): Likewise.
+ (vaddq_n): Likewise.
+ (vandq): Likewise.
+ (vabdq): Likewise.
+ * config/arm/arm_mve_builtins.def (BINOP_NONE_NONE_IMM): Use it.
+ (BINOP_NONE_NONE_NONE): Likewise.
+ (BINOP_NONE_NONE_UNONE): Likewise.
+ (BINOP_UNONE_NONE_IMM): Likewise.
+ (BINOP_UNONE_NONE_NONE): Likewise.
+ (BINOP_UNONE_UNONE_IMM): Likewise.
+ (BINOP_UNONE_UNONE_NONE): Likewise.
+ (BINOP_UNONE_UNONE_UNONE): Likewise.
+ * config/arm/constraints.md (Ra): Define constraint to check constant is
+ in the range of 0 to 7.
+ (Rg): Define constriant to check the constant is one among 1, 2, 4
+ and 8.
+ * config/arm/mve.md (mve_vabdq_<supf>): Define RTL pattern.
+ (mve_vaddq_n_<supf>): Likewise.
+ (mve_vaddvaq_<supf>): Likewise.
+ (mve_vaddvq_p_<supf>): Likewise.
+ (mve_vandq_<supf>): Likewise.
+ (mve_vbicq_<supf>): Likewise.
+ (mve_vbrsrq_n_<supf>): Likewise.
+ (mve_vcaddq_rot270_<supf>): Likewise.
+ (mve_vcaddq_rot90_<supf>): Likewise.
+ (mve_vcmpcsq_n_u): Likewise.
+ (mve_vcmpcsq_u): Likewise.
+ (mve_vcmpeqq_n_<supf>): Likewise.
+ (mve_vcmpeqq_<supf>): Likewise.
+ (mve_vcmpgeq_n_s): Likewise.
+ (mve_vcmpgeq_s): Likewise.
+ (mve_vcmpgtq_n_s): Likewise.
+ (mve_vcmpgtq_s): Likewise.
+ (mve_vcmphiq_n_u): Likewise.
+ (mve_vcmphiq_u): Likewise.
+ (mve_vcmpleq_n_s): Likewise.
+ (mve_vcmpleq_s): Likewise.
+ (mve_vcmpltq_n_s): Likewise.
+ (mve_vcmpltq_s): Likewise.
+ (mve_vcmpneq_n_<supf>): Likewise.
+ (mve_vddupq_n_u): Likewise.
+ (mve_veorq_<supf>): Likewise.
+ (mve_vhaddq_n_<supf>): Likewise.
+ (mve_vhaddq_<supf>): Likewise.
+ (mve_vhcaddq_rot270_s): Likewise.
+ (mve_vhcaddq_rot90_s): Likewise.
+ (mve_vhsubq_n_<supf>): Likewise.
+ (mve_vhsubq_<supf>): Likewise.
+ (mve_vidupq_n_u): Likewise.
+ (mve_vmaxaq_s): Likewise.
+ (mve_vmaxavq_s): Likewise.
+ (mve_vmaxq_<supf>): Likewise.
+ (mve_vmaxvq_<supf>): Likewise.
+ (mve_vminaq_s): Likewise.
+ (mve_vminavq_s): Likewise.
+ (mve_vminq_<supf>): Likewise.
+ (mve_vminvq_<supf>): Likewise.
+ (mve_vmladavq_<supf>): Likewise.
+ (mve_vmladavxq_s): Likewise.
+ (mve_vmlsdavq_s): Likewise.
+ (mve_vmlsdavxq_s): Likewise.
+ (mve_vmulhq_<supf>): Likewise.
+ (mve_vmullbq_int_<supf>): Likewise.
+ (mve_vmulltq_int_<supf>): Likewise.
+ (mve_vmulq_n_<supf>): Likewise.
+ (mve_vmulq_<supf>): Likewise.
+ (mve_vornq_<supf>): Likewise.
+ (mve_vorrq_<supf>): Likewise.
+ (mve_vqaddq_n_<supf>): Likewise.
+ (mve_vqaddq_<supf>): Likewise.
+ (mve_vqdmulhq_n_s): Likewise.
+ (mve_vqdmulhq_s): Likewise.
+ (mve_vqrdmulhq_n_s): Likewise.
+ (mve_vqrdmulhq_s): Likewise.
+ (mve_vqrshlq_n_<supf>): Likewise.
+ (mve_vqrshlq_<supf>): Likewise.
+ (mve_vqshlq_n_<supf>): Likewise.
+ (mve_vqshlq_r_<supf>): Likewise.
+ (mve_vqshlq_<supf>): Likewise.
+ (mve_vqshluq_n_s): Likewise.
+ (mve_vqsubq_n_<supf>): Likewise.
+ (mve_vqsubq_<supf>): Likewise.
+ (mve_vrhaddq_<supf>): Likewise.
+ (mve_vrmulhq_<supf>): Likewise.
+ (mve_vrshlq_n_<supf>): Likewise.
+ (mve_vrshlq_<supf>): Likewise.
+ (mve_vrshrq_n_<supf>): Likewise.
+ (mve_vshlq_n_<supf>): Likewise.
+ (mve_vshlq_r_<supf>): Likewise.
+ (mve_vsubq_n_<supf>): Likewise.
+ (mve_vsubq_<supf>): Likewise.
+ * config/arm/predicates.md (mve_imm_7): Define predicate to check
+ the matching constraint Ra.
+ (mve_imm_selective_upto_8): Define predicate to check the matching
+ constraint Rg.
+
+2020-03-17 Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Mihail Ionescu <mihail.ionescu@arm.com>
+ Srinath Parvathaneni <srinath.parvathaneni@arm.com>
+
* config/arm/arm-builtins.c (BINOP_NONE_NONE_UNONE_QUALIFIERS): Define
qualifier for binary operands.
(BINOP_UNONE_NONE_NONE_QUALIFIERS): Likewise.
diff --git a/gcc/config/arm/arm_mve.h b/gcc/config/arm/arm_mve.h
index ef0d2ac..eb81a02 100644
--- a/gcc/config/arm/arm_mve.h
+++ b/gcc/config/arm/arm_mve.h
@@ -239,6 +239,366 @@ typedef struct { uint8x16_t val[4]; } uint8x16x4_t;
#define vshlq_u8(__a, __b) __arm_vshlq_u8(__a, __b)
#define vshlq_u16(__a, __b) __arm_vshlq_u16(__a, __b)
#define vshlq_u32(__a, __b) __arm_vshlq_u32(__a, __b)
+#define vsubq_u8(__a, __b) __arm_vsubq_u8(__a, __b)
+#define vsubq_n_u8(__a, __b) __arm_vsubq_n_u8(__a, __b)
+#define vrmulhq_u8(__a, __b) __arm_vrmulhq_u8(__a, __b)
+#define vrhaddq_u8(__a, __b) __arm_vrhaddq_u8(__a, __b)
+#define vqsubq_u8(__a, __b) __arm_vqsubq_u8(__a, __b)
+#define vqsubq_n_u8(__a, __b) __arm_vqsubq_n_u8(__a, __b)
+#define vqaddq_u8(__a, __b) __arm_vqaddq_u8(__a, __b)
+#define vqaddq_n_u8(__a, __b) __arm_vqaddq_n_u8(__a, __b)
+#define vorrq_u8(__a, __b) __arm_vorrq_u8(__a, __b)
+#define vornq_u8(__a, __b) __arm_vornq_u8(__a, __b)
+#define vmulq_u8(__a, __b) __arm_vmulq_u8(__a, __b)
+#define vmulq_n_u8(__a, __b) __arm_vmulq_n_u8(__a, __b)
+#define vmulltq_int_u8(__a, __b) __arm_vmulltq_int_u8(__a, __b)
+#define vmullbq_int_u8(__a, __b) __arm_vmullbq_int_u8(__a, __b)
+#define vmulhq_u8(__a, __b) __arm_vmulhq_u8(__a, __b)
+#define vmladavq_u8(__a, __b) __arm_vmladavq_u8(__a, __b)
+#define vminvq_u8(__a, __b) __arm_vminvq_u8(__a, __b)
+#define vminq_u8(__a, __b) __arm_vminq_u8(__a, __b)
+#define vmaxvq_u8(__a, __b) __arm_vmaxvq_u8(__a, __b)
+#define vmaxq_u8(__a, __b) __arm_vmaxq_u8(__a, __b)
+#define vhsubq_u8(__a, __b) __arm_vhsubq_u8(__a, __b)
+#define vhsubq_n_u8(__a, __b) __arm_vhsubq_n_u8(__a, __b)
+#define vhaddq_u8(__a, __b) __arm_vhaddq_u8(__a, __b)
+#define vhaddq_n_u8(__a, __b) __arm_vhaddq_n_u8(__a, __b)
+#define veorq_u8(__a, __b) __arm_veorq_u8(__a, __b)
+#define vcmpneq_n_u8(__a, __b) __arm_vcmpneq_n_u8(__a, __b)
+#define vcmphiq_u8(__a, __b) __arm_vcmphiq_u8(__a, __b)
+#define vcmphiq_n_u8(__a, __b) __arm_vcmphiq_n_u8(__a, __b)
+#define vcmpeqq_u8(__a, __b) __arm_vcmpeqq_u8(__a, __b)
+#define vcmpeqq_n_u8(__a, __b) __arm_vcmpeqq_n_u8(__a, __b)
+#define vcmpcsq_u8(__a, __b) __arm_vcmpcsq_u8(__a, __b)
+#define vcmpcsq_n_u8(__a, __b) __arm_vcmpcsq_n_u8(__a, __b)
+#define vcaddq_rot90_u8(__a, __b) __arm_vcaddq_rot90_u8(__a, __b)
+#define vcaddq_rot270_u8(__a, __b) __arm_vcaddq_rot270_u8(__a, __b)
+#define vbicq_u8(__a, __b) __arm_vbicq_u8(__a, __b)
+#define vandq_u8(__a, __b) __arm_vandq_u8(__a, __b)
+#define vaddvq_p_u8(__a, __p) __arm_vaddvq_p_u8(__a, __p)
+#define vaddvaq_u8(__a, __b) __arm_vaddvaq_u8(__a, __b)
+#define vaddq_n_u8(__a, __b) __arm_vaddq_n_u8(__a, __b)
+#define vabdq_u8(__a, __b) __arm_vabdq_u8(__a, __b)
+#define vshlq_r_u8(__a, __b) __arm_vshlq_r_u8(__a, __b)
+#define vrshlq_u8(__a, __b) __arm_vrshlq_u8(__a, __b)
+#define vrshlq_n_u8(__a, __b) __arm_vrshlq_n_u8(__a, __b)
+#define vqshlq_u8(__a, __b) __arm_vqshlq_u8(__a, __b)
+#define vqshlq_r_u8(__a, __b) __arm_vqshlq_r_u8(__a, __b)
+#define vqrshlq_u8(__a, __b) __arm_vqrshlq_u8(__a, __b)
+#define vqrshlq_n_u8(__a, __b) __arm_vqrshlq_n_u8(__a, __b)
+#define vminavq_s8(__a, __b) __arm_vminavq_s8(__a, __b)
+#define vminaq_s8(__a, __b) __arm_vminaq_s8(__a, __b)
+#define vmaxavq_s8(__a, __b) __arm_vmaxavq_s8(__a, __b)
+#define vmaxaq_s8(__a, __b) __arm_vmaxaq_s8(__a, __b)
+#define vbrsrq_n_u8(__a, __b) __arm_vbrsrq_n_u8(__a, __b)
+#define vshlq_n_u8(__a, __imm) __arm_vshlq_n_u8(__a, __imm)
+#define vrshrq_n_u8(__a, __imm) __arm_vrshrq_n_u8(__a, __imm)
+#define vqshlq_n_u8(__a, __imm) __arm_vqshlq_n_u8(__a, __imm)
+#define vcmpneq_n_s8(__a, __b) __arm_vcmpneq_n_s8(__a, __b)
+#define vcmpltq_s8(__a, __b) __arm_vcmpltq_s8(__a, __b)
+#define vcmpltq_n_s8(__a, __b) __arm_vcmpltq_n_s8(__a, __b)
+#define vcmpleq_s8(__a, __b) __arm_vcmpleq_s8(__a, __b)
+#define vcmpleq_n_s8(__a, __b) __arm_vcmpleq_n_s8(__a, __b)
+#define vcmpgtq_s8(__a, __b) __arm_vcmpgtq_s8(__a, __b)
+#define vcmpgtq_n_s8(__a, __b) __arm_vcmpgtq_n_s8(__a, __b)
+#define vcmpgeq_s8(__a, __b) __arm_vcmpgeq_s8(__a, __b)
+#define vcmpgeq_n_s8(__a, __b) __arm_vcmpgeq_n_s8(__a, __b)
+#define vcmpeqq_s8(__a, __b) __arm_vcmpeqq_s8(__a, __b)
+#define vcmpeqq_n_s8(__a, __b) __arm_vcmpeqq_n_s8(__a, __b)
+#define vqshluq_n_s8(__a, __imm) __arm_vqshluq_n_s8(__a, __imm)
+#define vaddvq_p_s8(__a, __p) __arm_vaddvq_p_s8(__a, __p)
+#define vsubq_s8(__a, __b) __arm_vsubq_s8(__a, __b)
+#define vsubq_n_s8(__a, __b) __arm_vsubq_n_s8(__a, __b)
+#define vshlq_r_s8(__a, __b) __arm_vshlq_r_s8(__a, __b)
+#define vrshlq_s8(__a, __b) __arm_vrshlq_s8(__a, __b)
+#define vrshlq_n_s8(__a, __b) __arm_vrshlq_n_s8(__a, __b)
+#define vrmulhq_s8(__a, __b) __arm_vrmulhq_s8(__a, __b)
+#define vrhaddq_s8(__a, __b) __arm_vrhaddq_s8(__a, __b)
+#define vqsubq_s8(__a, __b) __arm_vqsubq_s8(__a, __b)
+#define vqsubq_n_s8(__a, __b) __arm_vqsubq_n_s8(__a, __b)
+#define vqshlq_s8(__a, __b) __arm_vqshlq_s8(__a, __b)
+#define vqshlq_r_s8(__a, __b) __arm_vqshlq_r_s8(__a, __b)
+#define vqrshlq_s8(__a, __b) __arm_vqrshlq_s8(__a, __b)
+#define vqrshlq_n_s8(__a, __b) __arm_vqrshlq_n_s8(__a, __b)
+#define vqrdmulhq_s8(__a, __b) __arm_vqrdmulhq_s8(__a, __b)
+#define vqrdmulhq_n_s8(__a, __b) __arm_vqrdmulhq_n_s8(__a, __b)
+#define vqdmulhq_s8(__a, __b) __arm_vqdmulhq_s8(__a, __b)
+#define vqdmulhq_n_s8(__a, __b) __arm_vqdmulhq_n_s8(__a, __b)
+#define vqaddq_s8(__a, __b) __arm_vqaddq_s8(__a, __b)
+#define vqaddq_n_s8(__a, __b) __arm_vqaddq_n_s8(__a, __b)
+#define vorrq_s8(__a, __b) __arm_vorrq_s8(__a, __b)
+#define vornq_s8(__a, __b) __arm_vornq_s8(__a, __b)
+#define vmulq_s8(__a, __b) __arm_vmulq_s8(__a, __b)
+#define vmulq_n_s8(__a, __b) __arm_vmulq_n_s8(__a, __b)
+#define vmulltq_int_s8(__a, __b) __arm_vmulltq_int_s8(__a, __b)
+#define vmullbq_int_s8(__a, __b) __arm_vmullbq_int_s8(__a, __b)
+#define vmulhq_s8(__a, __b) __arm_vmulhq_s8(__a, __b)
+#define vmlsdavxq_s8(__a, __b) __arm_vmlsdavxq_s8(__a, __b)
+#define vmlsdavq_s8(__a, __b) __arm_vmlsdavq_s8(__a, __b)
+#define vmladavxq_s8(__a, __b) __arm_vmladavxq_s8(__a, __b)
+#define vmladavq_s8(__a, __b) __arm_vmladavq_s8(__a, __b)
+#define vminvq_s8(__a, __b) __arm_vminvq_s8(__a, __b)
+#define vminq_s8(__a, __b) __arm_vminq_s8(__a, __b)
+#define vmaxvq_s8(__a, __b) __arm_vmaxvq_s8(__a, __b)
+#define vmaxq_s8(__a, __b) __arm_vmaxq_s8(__a, __b)
+#define vhsubq_s8(__a, __b) __arm_vhsubq_s8(__a, __b)
+#define vhsubq_n_s8(__a, __b) __arm_vhsubq_n_s8(__a, __b)
+#define vhcaddq_rot90_s8(__a, __b) __arm_vhcaddq_rot90_s8(__a, __b)
+#define vhcaddq_rot270_s8(__a, __b) __arm_vhcaddq_rot270_s8(__a, __b)
+#define vhaddq_s8(__a, __b) __arm_vhaddq_s8(__a, __b)
+#define vhaddq_n_s8(__a, __b) __arm_vhaddq_n_s8(__a, __b)
+#define veorq_s8(__a, __b) __arm_veorq_s8(__a, __b)
+#define vcaddq_rot90_s8(__a, __b) __arm_vcaddq_rot90_s8(__a, __b)
+#define vcaddq_rot270_s8(__a, __b) __arm_vcaddq_rot270_s8(__a, __b)
+#define vbrsrq_n_s8(__a, __b) __arm_vbrsrq_n_s8(__a, __b)
+#define vbicq_s8(__a, __b) __arm_vbicq_s8(__a, __b)
+#define vandq_s8(__a, __b) __arm_vandq_s8(__a, __b)
+#define vaddvaq_s8(__a, __b) __arm_vaddvaq_s8(__a, __b)
+#define vaddq_n_s8(__a, __b) __arm_vaddq_n_s8(__a, __b)
+#define vabdq_s8(__a, __b) __arm_vabdq_s8(__a, __b)
+#define vshlq_n_s8(__a, __imm) __arm_vshlq_n_s8(__a, __imm)
+#define vrshrq_n_s8(__a, __imm) __arm_vrshrq_n_s8(__a, __imm)
+#define vqshlq_n_s8(__a, __imm) __arm_vqshlq_n_s8(__a, __imm)
+#define vsubq_u16(__a, __b) __arm_vsubq_u16(__a, __b)
+#define vsubq_n_u16(__a, __b) __arm_vsubq_n_u16(__a, __b)
+#define vrmulhq_u16(__a, __b) __arm_vrmulhq_u16(__a, __b)
+#define vrhaddq_u16(__a, __b) __arm_vrhaddq_u16(__a, __b)
+#define vqsubq_u16(__a, __b) __arm_vqsubq_u16(__a, __b)
+#define vqsubq_n_u16(__a, __b) __arm_vqsubq_n_u16(__a, __b)
+#define vqaddq_u16(__a, __b) __arm_vqaddq_u16(__a, __b)
+#define vqaddq_n_u16(__a, __b) __arm_vqaddq_n_u16(__a, __b)
+#define vorrq_u16(__a, __b) __arm_vorrq_u16(__a, __b)
+#define vornq_u16(__a, __b) __arm_vornq_u16(__a, __b)
+#define vmulq_u16(__a, __b) __arm_vmulq_u16(__a, __b)
+#define vmulq_n_u16(__a, __b) __arm_vmulq_n_u16(__a, __b)
+#define vmulltq_int_u16(__a, __b) __arm_vmulltq_int_u16(__a, __b)
+#define vmullbq_int_u16(__a, __b) __arm_vmullbq_int_u16(__a, __b)
+#define vmulhq_u16(__a, __b) __arm_vmulhq_u16(__a, __b)
+#define vmladavq_u16(__a, __b) __arm_vmladavq_u16(__a, __b)
+#define vminvq_u16(__a, __b) __arm_vminvq_u16(__a, __b)
+#define vminq_u16(__a, __b) __arm_vminq_u16(__a, __b)
+#define vmaxvq_u16(__a, __b) __arm_vmaxvq_u16(__a, __b)
+#define vmaxq_u16(__a, __b) __arm_vmaxq_u16(__a, __b)
+#define vhsubq_u16(__a, __b) __arm_vhsubq_u16(__a, __b)
+#define vhsubq_n_u16(__a, __b) __arm_vhsubq_n_u16(__a, __b)
+#define vhaddq_u16(__a, __b) __arm_vhaddq_u16(__a, __b)
+#define vhaddq_n_u16(__a, __b) __arm_vhaddq_n_u16(__a, __b)
+#define veorq_u16(__a, __b) __arm_veorq_u16(__a, __b)
+#define vcmpneq_n_u16(__a, __b) __arm_vcmpneq_n_u16(__a, __b)
+#define vcmphiq_u16(__a, __b) __arm_vcmphiq_u16(__a, __b)
+#define vcmphiq_n_u16(__a, __b) __arm_vcmphiq_n_u16(__a, __b)
+#define vcmpeqq_u16(__a, __b) __arm_vcmpeqq_u16(__a, __b)
+#define vcmpeqq_n_u16(__a, __b) __arm_vcmpeqq_n_u16(__a, __b)
+#define vcmpcsq_u16(__a, __b) __arm_vcmpcsq_u16(__a, __b)
+#define vcmpcsq_n_u16(__a, __b) __arm_vcmpcsq_n_u16(__a, __b)
+#define vcaddq_rot90_u16(__a, __b) __arm_vcaddq_rot90_u16(__a, __b)
+#define vcaddq_rot270_u16(__a, __b) __arm_vcaddq_rot270_u16(__a, __b)
+#define vbicq_u16(__a, __b) __arm_vbicq_u16(__a, __b)
+#define vandq_u16(__a, __b) __arm_vandq_u16(__a, __b)
+#define vaddvq_p_u16(__a, __p) __arm_vaddvq_p_u16(__a, __p)
+#define vaddvaq_u16(__a, __b) __arm_vaddvaq_u16(__a, __b)
+#define vaddq_n_u16(__a, __b) __arm_vaddq_n_u16(__a, __b)
+#define vabdq_u16(__a, __b) __arm_vabdq_u16(__a, __b)
+#define vshlq_r_u16(__a, __b) __arm_vshlq_r_u16(__a, __b)
+#define vrshlq_u16(__a, __b) __arm_vrshlq_u16(__a, __b)
+#define vrshlq_n_u16(__a, __b) __arm_vrshlq_n_u16(__a, __b)
+#define vqshlq_u16(__a, __b) __arm_vqshlq_u16(__a, __b)
+#define vqshlq_r_u16(__a, __b) __arm_vqshlq_r_u16(__a, __b)
+#define vqrshlq_u16(__a, __b) __arm_vqrshlq_u16(__a, __b)
+#define vqrshlq_n_u16(__a, __b) __arm_vqrshlq_n_u16(__a, __b)
+#define vminavq_s16(__a, __b) __arm_vminavq_s16(__a, __b)
+#define vminaq_s16(__a, __b) __arm_vminaq_s16(__a, __b)
+#define vmaxavq_s16(__a, __b) __arm_vmaxavq_s16(__a, __b)
+#define vmaxaq_s16(__a, __b) __arm_vmaxaq_s16(__a, __b)
+#define vbrsrq_n_u16(__a, __b) __arm_vbrsrq_n_u16(__a, __b)
+#define vshlq_n_u16(__a, __imm) __arm_vshlq_n_u16(__a, __imm)
+#define vrshrq_n_u16(__a, __imm) __arm_vrshrq_n_u16(__a, __imm)
+#define vqshlq_n_u16(__a, __imm) __arm_vqshlq_n_u16(__a, __imm)
+#define vcmpneq_n_s16(__a, __b) __arm_vcmpneq_n_s16(__a, __b)
+#define vcmpltq_s16(__a, __b) __arm_vcmpltq_s16(__a, __b)
+#define vcmpltq_n_s16(__a, __b) __arm_vcmpltq_n_s16(__a, __b)
+#define vcmpleq_s16(__a, __b) __arm_vcmpleq_s16(__a, __b)
+#define vcmpleq_n_s16(__a, __b) __arm_vcmpleq_n_s16(__a, __b)
+#define vcmpgtq_s16(__a, __b) __arm_vcmpgtq_s16(__a, __b)
+#define vcmpgtq_n_s16(__a, __b) __arm_vcmpgtq_n_s16(__a, __b)
+#define vcmpgeq_s16(__a, __b) __arm_vcmpgeq_s16(__a, __b)
+#define vcmpgeq_n_s16(__a, __b) __arm_vcmpgeq_n_s16(__a, __b)
+#define vcmpeqq_s16(__a, __b) __arm_vcmpeqq_s16(__a, __b)
+#define vcmpeqq_n_s16(__a, __b) __arm_vcmpeqq_n_s16(__a, __b)
+#define vqshluq_n_s16(__a, __imm) __arm_vqshluq_n_s16(__a, __imm)
+#define vaddvq_p_s16(__a, __p) __arm_vaddvq_p_s16(__a, __p)
+#define vsubq_s16(__a, __b) __arm_vsubq_s16(__a, __b)
+#define vsubq_n_s16(__a, __b) __arm_vsubq_n_s16(__a, __b)
+#define vshlq_r_s16(__a, __b) __arm_vshlq_r_s16(__a, __b)
+#define vrshlq_s16(__a, __b) __arm_vrshlq_s16(__a, __b)
+#define vrshlq_n_s16(__a, __b) __arm_vrshlq_n_s16(__a, __b)
+#define vrmulhq_s16(__a, __b) __arm_vrmulhq_s16(__a, __b)
+#define vrhaddq_s16(__a, __b) __arm_vrhaddq_s16(__a, __b)
+#define vqsubq_s16(__a, __b) __arm_vqsubq_s16(__a, __b)
+#define vqsubq_n_s16(__a, __b) __arm_vqsubq_n_s16(__a, __b)
+#define vqshlq_s16(__a, __b) __arm_vqshlq_s16(__a, __b)
+#define vqshlq_r_s16(__a, __b) __arm_vqshlq_r_s16(__a, __b)
+#define vqrshlq_s16(__a, __b) __arm_vqrshlq_s16(__a, __b)
+#define vqrshlq_n_s16(__a, __b) __arm_vqrshlq_n_s16(__a, __b)
+#define vqrdmulhq_s16(__a, __b) __arm_vqrdmulhq_s16(__a, __b)
+#define vqrdmulhq_n_s16(__a, __b) __arm_vqrdmulhq_n_s16(__a, __b)
+#define vqdmulhq_s16(__a, __b) __arm_vqdmulhq_s16(__a, __b)
+#define vqdmulhq_n_s16(__a, __b) __arm_vqdmulhq_n_s16(__a, __b)
+#define vqaddq_s16(__a, __b) __arm_vqaddq_s16(__a, __b)
+#define vqaddq_n_s16(__a, __b) __arm_vqaddq_n_s16(__a, __b)
+#define vorrq_s16(__a, __b) __arm_vorrq_s16(__a, __b)
+#define vornq_s16(__a, __b) __arm_vornq_s16(__a, __b)
+#define vmulq_s16(__a, __b) __arm_vmulq_s16(__a, __b)
+#define vmulq_n_s16(__a, __b) __arm_vmulq_n_s16(__a, __b)
+#define vmulltq_int_s16(__a, __b) __arm_vmulltq_int_s16(__a, __b)
+#define vmullbq_int_s16(__a, __b) __arm_vmullbq_int_s16(__a, __b)
+#define vmulhq_s16(__a, __b) __arm_vmulhq_s16(__a, __b)
+#define vmlsdavxq_s16(__a, __b) __arm_vmlsdavxq_s16(__a, __b)
+#define vmlsdavq_s16(__a, __b) __arm_vmlsdavq_s16(__a, __b)
+#define vmladavxq_s16(__a, __b) __arm_vmladavxq_s16(__a, __b)
+#define vmladavq_s16(__a, __b) __arm_vmladavq_s16(__a, __b)
+#define vminvq_s16(__a, __b) __arm_vminvq_s16(__a, __b)
+#define vminq_s16(__a, __b) __arm_vminq_s16(__a, __b)
+#define vmaxvq_s16(__a, __b) __arm_vmaxvq_s16(__a, __b)
+#define vmaxq_s16(__a, __b) __arm_vmaxq_s16(__a, __b)
+#define vhsubq_s16(__a, __b) __arm_vhsubq_s16(__a, __b)
+#define vhsubq_n_s16(__a, __b) __arm_vhsubq_n_s16(__a, __b)
+#define vhcaddq_rot90_s16(__a, __b) __arm_vhcaddq_rot90_s16(__a, __b)
+#define vhcaddq_rot270_s16(__a, __b) __arm_vhcaddq_rot270_s16(__a, __b)
+#define vhaddq_s16(__a, __b) __arm_vhaddq_s16(__a, __b)
+#define vhaddq_n_s16(__a, __b) __arm_vhaddq_n_s16(__a, __b)
+#define veorq_s16(__a, __b) __arm_veorq_s16(__a, __b)
+#define vcaddq_rot90_s16(__a, __b) __arm_vcaddq_rot90_s16(__a, __b)
+#define vcaddq_rot270_s16(__a, __b) __arm_vcaddq_rot270_s16(__a, __b)
+#define vbrsrq_n_s16(__a, __b) __arm_vbrsrq_n_s16(__a, __b)
+#define vbicq_s16(__a, __b) __arm_vbicq_s16(__a, __b)
+#define vandq_s16(__a, __b) __arm_vandq_s16(__a, __b)
+#define vaddvaq_s16(__a, __b) __arm_vaddvaq_s16(__a, __b)
+#define vaddq_n_s16(__a, __b) __arm_vaddq_n_s16(__a, __b)
+#define vabdq_s16(__a, __b) __arm_vabdq_s16(__a, __b)
+#define vshlq_n_s16(__a, __imm) __arm_vshlq_n_s16(__a, __imm)
+#define vrshrq_n_s16(__a, __imm) __arm_vrshrq_n_s16(__a, __imm)
+#define vqshlq_n_s16(__a, __imm) __arm_vqshlq_n_s16(__a, __imm)
+#define vsubq_u32(__a, __b) __arm_vsubq_u32(__a, __b)
+#define vsubq_n_u32(__a, __b) __arm_vsubq_n_u32(__a, __b)
+#define vrmulhq_u32(__a, __b) __arm_vrmulhq_u32(__a, __b)
+#define vrhaddq_u32(__a, __b) __arm_vrhaddq_u32(__a, __b)
+#define vqsubq_u32(__a, __b) __arm_vqsubq_u32(__a, __b)
+#define vqsubq_n_u32(__a, __b) __arm_vqsubq_n_u32(__a, __b)
+#define vqaddq_u32(__a, __b) __arm_vqaddq_u32(__a, __b)
+#define vqaddq_n_u32(__a, __b) __arm_vqaddq_n_u32(__a, __b)
+#define vorrq_u32(__a, __b) __arm_vorrq_u32(__a, __b)
+#define vornq_u32(__a, __b) __arm_vornq_u32(__a, __b)
+#define vmulq_u32(__a, __b) __arm_vmulq_u32(__a, __b)
+#define vmulq_n_u32(__a, __b) __arm_vmulq_n_u32(__a, __b)
+#define vmulltq_int_u32(__a, __b) __arm_vmulltq_int_u32(__a, __b)
+#define vmullbq_int_u32(__a, __b) __arm_vmullbq_int_u32(__a, __b)
+#define vmulhq_u32(__a, __b) __arm_vmulhq_u32(__a, __b)
+#define vmladavq_u32(__a, __b) __arm_vmladavq_u32(__a, __b)
+#define vminvq_u32(__a, __b) __arm_vminvq_u32(__a, __b)
+#define vminq_u32(__a, __b) __arm_vminq_u32(__a, __b)
+#define vmaxvq_u32(__a, __b) __arm_vmaxvq_u32(__a, __b)
+#define vmaxq_u32(__a, __b) __arm_vmaxq_u32(__a, __b)
+#define vhsubq_u32(__a, __b) __arm_vhsubq_u32(__a, __b)
+#define vhsubq_n_u32(__a, __b) __arm_vhsubq_n_u32(__a, __b)
+#define vhaddq_u32(__a, __b) __arm_vhaddq_u32(__a, __b)
+#define vhaddq_n_u32(__a, __b) __arm_vhaddq_n_u32(__a, __b)
+#define veorq_u32(__a, __b) __arm_veorq_u32(__a, __b)
+#define vcmpneq_n_u32(__a, __b) __arm_vcmpneq_n_u32(__a, __b)
+#define vcmphiq_u32(__a, __b) __arm_vcmphiq_u32(__a, __b)
+#define vcmphiq_n_u32(__a, __b) __arm_vcmphiq_n_u32(__a, __b)
+#define vcmpeqq_u32(__a, __b) __arm_vcmpeqq_u32(__a, __b)
+#define vcmpeqq_n_u32(__a, __b) __arm_vcmpeqq_n_u32(__a, __b)
+#define vcmpcsq_u32(__a, __b) __arm_vcmpcsq_u32(__a, __b)
+#define vcmpcsq_n_u32(__a, __b) __arm_vcmpcsq_n_u32(__a, __b)
+#define vcaddq_rot90_u32(__a, __b) __arm_vcaddq_rot90_u32(__a, __b)
+#define vcaddq_rot270_u32(__a, __b) __arm_vcaddq_rot270_u32(__a, __b)
+#define vbicq_u32(__a, __b) __arm_vbicq_u32(__a, __b)
+#define vandq_u32(__a, __b) __arm_vandq_u32(__a, __b)
+#define vaddvq_p_u32(__a, __p) __arm_vaddvq_p_u32(__a, __p)
+#define vaddvaq_u32(__a, __b) __arm_vaddvaq_u32(__a, __b)
+#define vaddq_n_u32(__a, __b) __arm_vaddq_n_u32(__a, __b)
+#define vabdq_u32(__a, __b) __arm_vabdq_u32(__a, __b)
+#define vshlq_r_u32(__a, __b) __arm_vshlq_r_u32(__a, __b)
+#define vrshlq_u32(__a, __b) __arm_vrshlq_u32(__a, __b)
+#define vrshlq_n_u32(__a, __b) __arm_vrshlq_n_u32(__a, __b)
+#define vqshlq_u32(__a, __b) __arm_vqshlq_u32(__a, __b)
+#define vqshlq_r_u32(__a, __b) __arm_vqshlq_r_u32(__a, __b)
+#define vqrshlq_u32(__a, __b) __arm_vqrshlq_u32(__a, __b)
+#define vqrshlq_n_u32(__a, __b) __arm_vqrshlq_n_u32(__a, __b)
+#define vminavq_s32(__a, __b) __arm_vminavq_s32(__a, __b)
+#define vminaq_s32(__a, __b) __arm_vminaq_s32(__a, __b)
+#define vmaxavq_s32(__a, __b) __arm_vmaxavq_s32(__a, __b)
+#define vmaxaq_s32(__a, __b) __arm_vmaxaq_s32(__a, __b)
+#define vbrsrq_n_u32(__a, __b) __arm_vbrsrq_n_u32(__a, __b)
+#define vshlq_n_u32(__a, __imm) __arm_vshlq_n_u32(__a, __imm)
+#define vrshrq_n_u32(__a, __imm) __arm_vrshrq_n_u32(__a, __imm)
+#define vqshlq_n_u32(__a, __imm) __arm_vqshlq_n_u32(__a, __imm)
+#define vcmpneq_n_s32(__a, __b) __arm_vcmpneq_n_s32(__a, __b)
+#define vcmpltq_s32(__a, __b) __arm_vcmpltq_s32(__a, __b)
+#define vcmpltq_n_s32(__a, __b) __arm_vcmpltq_n_s32(__a, __b)
+#define vcmpleq_s32(__a, __b) __arm_vcmpleq_s32(__a, __b)
+#define vcmpleq_n_s32(__a, __b) __arm_vcmpleq_n_s32(__a, __b)
+#define vcmpgtq_s32(__a, __b) __arm_vcmpgtq_s32(__a, __b)
+#define vcmpgtq_n_s32(__a, __b) __arm_vcmpgtq_n_s32(__a, __b)
+#define vcmpgeq_s32(__a, __b) __arm_vcmpgeq_s32(__a, __b)
+#define vcmpgeq_n_s32(__a, __b) __arm_vcmpgeq_n_s32(__a, __b)
+#define vcmpeqq_s32(__a, __b) __arm_vcmpeqq_s32(__a, __b)
+#define vcmpeqq_n_s32(__a, __b) __arm_vcmpeqq_n_s32(__a, __b)
+#define vqshluq_n_s32(__a, __imm) __arm_vqshluq_n_s32(__a, __imm)
+#define vaddvq_p_s32(__a, __p) __arm_vaddvq_p_s32(__a, __p)
+#define vsubq_s32(__a, __b) __arm_vsubq_s32(__a, __b)
+#define vsubq_n_s32(__a, __b) __arm_vsubq_n_s32(__a, __b)
+#define vshlq_r_s32(__a, __b) __arm_vshlq_r_s32(__a, __b)
+#define vrshlq_s32(__a, __b) __arm_vrshlq_s32(__a, __b)
+#define vrshlq_n_s32(__a, __b) __arm_vrshlq_n_s32(__a, __b)
+#define vrmulhq_s32(__a, __b) __arm_vrmulhq_s32(__a, __b)
+#define vrhaddq_s32(__a, __b) __arm_vrhaddq_s32(__a, __b)
+#define vqsubq_s32(__a, __b) __arm_vqsubq_s32(__a, __b)
+#define vqsubq_n_s32(__a, __b) __arm_vqsubq_n_s32(__a, __b)
+#define vqshlq_s32(__a, __b) __arm_vqshlq_s32(__a, __b)
+#define vqshlq_r_s32(__a, __b) __arm_vqshlq_r_s32(__a, __b)
+#define vqrshlq_s32(__a, __b) __arm_vqrshlq_s32(__a, __b)
+#define vqrshlq_n_s32(__a, __b) __arm_vqrshlq_n_s32(__a, __b)
+#define vqrdmulhq_s32(__a, __b) __arm_vqrdmulhq_s32(__a, __b)
+#define vqrdmulhq_n_s32(__a, __b) __arm_vqrdmulhq_n_s32(__a, __b)
+#define vqdmulhq_s32(__a, __b) __arm_vqdmulhq_s32(__a, __b)
+#define vqdmulhq_n_s32(__a, __b) __arm_vqdmulhq_n_s32(__a, __b)
+#define vqaddq_s32(__a, __b) __arm_vqaddq_s32(__a, __b)
+#define vqaddq_n_s32(__a, __b) __arm_vqaddq_n_s32(__a, __b)
+#define vorrq_s32(__a, __b) __arm_vorrq_s32(__a, __b)
+#define vornq_s32(__a, __b) __arm_vornq_s32(__a, __b)
+#define vmulq_s32(__a, __b) __arm_vmulq_s32(__a, __b)
+#define vmulq_n_s32(__a, __b) __arm_vmulq_n_s32(__a, __b)
+#define vmulltq_int_s32(__a, __b) __arm_vmulltq_int_s32(__a, __b)
+#define vmullbq_int_s32(__a, __b) __arm_vmullbq_int_s32(__a, __b)
+#define vmulhq_s32(__a, __b) __arm_vmulhq_s32(__a, __b)
+#define vmlsdavxq_s32(__a, __b) __arm_vmlsdavxq_s32(__a, __b)
+#define vmlsdavq_s32(__a, __b) __arm_vmlsdavq_s32(__a, __b)
+#define vmladavxq_s32(__a, __b) __arm_vmladavxq_s32(__a, __b)
+#define vmladavq_s32(__a, __b) __arm_vmladavq_s32(__a, __b)
+#define vminvq_s32(__a, __b) __arm_vminvq_s32(__a, __b)
+#define vminq_s32(__a, __b) __arm_vminq_s32(__a, __b)
+#define vmaxvq_s32(__a, __b) __arm_vmaxvq_s32(__a, __b)
+#define vmaxq_s32(__a, __b) __arm_vmaxq_s32(__a, __b)
+#define vhsubq_s32(__a, __b) __arm_vhsubq_s32(__a, __b)
+#define vhsubq_n_s32(__a, __b) __arm_vhsubq_n_s32(__a, __b)
+#define vhcaddq_rot90_s32(__a, __b) __arm_vhcaddq_rot90_s32(__a, __b)
+#define vhcaddq_rot270_s32(__a, __b) __arm_vhcaddq_rot270_s32(__a, __b)
+#define vhaddq_s32(__a, __b) __arm_vhaddq_s32(__a, __b)
+#define vhaddq_n_s32(__a, __b) __arm_vhaddq_n_s32(__a, __b)
+#define veorq_s32(__a, __b) __arm_veorq_s32(__a, __b)
+#define vcaddq_rot90_s32(__a, __b) __arm_vcaddq_rot90_s32(__a, __b)
+#define vcaddq_rot270_s32(__a, __b) __arm_vcaddq_rot270_s32(__a, __b)
+#define vbrsrq_n_s32(__a, __b) __arm_vbrsrq_n_s32(__a, __b)
+#define vbicq_s32(__a, __b) __arm_vbicq_s32(__a, __b)
+#define vandq_s32(__a, __b) __arm_vandq_s32(__a, __b)
+#define vaddvaq_s32(__a, __b) __arm_vaddvaq_s32(__a, __b)
+#define vaddq_n_s32(__a, __b) __arm_vaddq_n_s32(__a, __b)
+#define vabdq_s32(__a, __b) __arm_vabdq_s32(__a, __b)
+#define vshlq_n_s32(__a, __imm) __arm_vshlq_n_s32(__a, __imm)
+#define vrshrq_n_s32(__a, __imm) __arm_vrshrq_n_s32(__a, __imm)
+#define vqshlq_n_s32(__a, __imm) __arm_vqshlq_n_s32(__a, __imm)
#endif
__extension__ extern __inline void
@@ -979,6 +1339,2525 @@ __arm_vshlq_u32 (uint32x4_t __a, int32x4_t __b)
{
return __builtin_mve_vshlq_uv4si (__a, __b);
}
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vsubq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_n_u8 (uint8x16_t __a, uint8_t __b)
+{
+ return __builtin_mve_vsubq_n_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vrmulhq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vrhaddq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vqsubq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_n_u8 (uint8x16_t __a, uint8_t __b)
+{
+ return __builtin_mve_vqsubq_n_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vqaddq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_n_u8 (uint8x16_t __a, uint8_t __b)
+{
+ return __builtin_mve_vqaddq_n_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vorrq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vornq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vmulq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_n_u8 (uint8x16_t __a, uint8_t __b)
+{
+ return __builtin_mve_vmulq_n_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vmulltq_int_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vmullbq_int_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vmulhq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vmladavq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq_u8 (uint8_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vminvq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vminq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq_u8 (uint8_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vmaxvq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vmaxq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vhsubq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_n_u8 (uint8x16_t __a, uint8_t __b)
+{
+ return __builtin_mve_vhsubq_n_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vhaddq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_n_u8 (uint8x16_t __a, uint8_t __b)
+{
+ return __builtin_mve_vhaddq_n_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_veorq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_n_u8 (uint8x16_t __a, uint8_t __b)
+{
+ return __builtin_mve_vcmpneq_n_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vcmphiq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq_n_u8 (uint8x16_t __a, uint8_t __b)
+{
+ return __builtin_mve_vcmphiq_n_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vcmpeqq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_n_u8 (uint8x16_t __a, uint8_t __b)
+{
+ return __builtin_mve_vcmpeqq_n_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vcmpcsq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq_n_u8 (uint8x16_t __a, uint8_t __b)
+{
+ return __builtin_mve_vcmpcsq_n_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vcaddq_rot90_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vcaddq_rot270_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vbicq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vandq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq_p_u8 (uint8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddvq_p_uv16qi (__a, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq_u8 (uint32_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vaddvaq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_n_u8 (uint8x16_t __a, uint8_t __b)
+{
+ return __builtin_mve_vaddq_n_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __builtin_mve_vabdq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_r_u8 (uint8x16_t __a, int32_t __b)
+{
+ return __builtin_mve_vshlq_r_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vrshlq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_n_u8 (uint8x16_t __a, int32_t __b)
+{
+ return __builtin_mve_vrshlq_n_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vqshlq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_r_u8 (uint8x16_t __a, int32_t __b)
+{
+ return __builtin_mve_vqshlq_r_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vqrshlq_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_n_u8 (uint8x16_t __a, int32_t __b)
+{
+ return __builtin_mve_vqrshlq_n_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminavq_s8 (uint8_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vminavq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminaq_s8 (uint8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vminaq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxavq_s8 (uint8_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vmaxavq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxaq_s8 (uint8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vmaxaq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_n_u8 (uint8x16_t __a, int32_t __b)
+{
+ return __builtin_mve_vbrsrq_n_uv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_n_u8 (uint8x16_t __a, const int __imm)
+{
+ return __builtin_mve_vshlq_n_uv16qi (__a, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_n_u8 (uint8x16_t __a, const int __imm)
+{
+ return __builtin_mve_vrshrq_n_uv16qi (__a, __imm);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_n_u8 (uint8x16_t __a, const int __imm)
+{
+ return __builtin_mve_vqshlq_n_uv16qi (__a, __imm);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vcmpneq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vcmpltq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vcmpltq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vcmpleq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vcmpleq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vcmpgtq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vcmpgtq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vcmpgeq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vcmpgeq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vcmpeqq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vcmpeqq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshluq_n_s8 (int8x16_t __a, const int __imm)
+{
+ return __builtin_mve_vqshluq_n_sv16qi (__a, __imm);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq_p_s8 (int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddvq_p_sv16qi (__a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vsubq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vsubq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_r_s8 (int8x16_t __a, int32_t __b)
+{
+ return __builtin_mve_vshlq_r_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vrshlq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_n_s8 (int8x16_t __a, int32_t __b)
+{
+ return __builtin_mve_vrshlq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vrmulhq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vrhaddq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vqsubq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vqsubq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vqshlq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_r_s8 (int8x16_t __a, int32_t __b)
+{
+ return __builtin_mve_vqshlq_r_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vqrshlq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_n_s8 (int8x16_t __a, int32_t __b)
+{
+ return __builtin_mve_vqrshlq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vqrdmulhq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vqrdmulhq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vqdmulhq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vqdmulhq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vqaddq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vqaddq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vorrq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vornq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vmulq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vmulq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vmulltq_int_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vmullbq_int_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vmulhq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavxq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vmlsdavxq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vmlsdavq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavxq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vmladavxq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vmladavq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq_s8 (int8_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vminvq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vminq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq_s8 (int8_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vmaxvq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vmaxq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vhsubq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vhsubq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot90_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vhcaddq_rot90_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot270_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vhcaddq_rot270_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vhaddq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vhaddq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_veorq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vcaddq_rot90_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vcaddq_rot270_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_n_s8 (int8x16_t __a, int32_t __b)
+{
+ return __builtin_mve_vbrsrq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vbicq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vandq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq_s8 (int32_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vaddvaq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_n_s8 (int8x16_t __a, int8_t __b)
+{
+ return __builtin_mve_vaddq_n_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __builtin_mve_vabdq_sv16qi (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_n_s8 (int8x16_t __a, const int __imm)
+{
+ return __builtin_mve_vshlq_n_sv16qi (__a, __imm);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_n_s8 (int8x16_t __a, const int __imm)
+{
+ return __builtin_mve_vrshrq_n_sv16qi (__a, __imm);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_n_s8 (int8x16_t __a, const int __imm)
+{
+ return __builtin_mve_vqshlq_n_sv16qi (__a, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vsubq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return __builtin_mve_vsubq_n_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vrmulhq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vrhaddq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vqsubq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return __builtin_mve_vqsubq_n_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vqaddq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return __builtin_mve_vqaddq_n_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vorrq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vornq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vmulq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return __builtin_mve_vmulq_n_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vmulltq_int_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vmullbq_int_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vmulhq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vmladavq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq_u16 (uint16_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vminvq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vminq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq_u16 (uint16_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vmaxvq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vmaxq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vhsubq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return __builtin_mve_vhsubq_n_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vhaddq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return __builtin_mve_vhaddq_n_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_veorq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return __builtin_mve_vcmpneq_n_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vcmphiq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return __builtin_mve_vcmphiq_n_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vcmpeqq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return __builtin_mve_vcmpeqq_n_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vcmpcsq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return __builtin_mve_vcmpcsq_n_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vcaddq_rot90_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vcaddq_rot270_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vbicq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vandq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq_p_u16 (uint16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddvq_p_uv8hi (__a, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq_u16 (uint32_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vaddvaq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return __builtin_mve_vaddq_n_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __builtin_mve_vabdq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_r_u16 (uint16x8_t __a, int32_t __b)
+{
+ return __builtin_mve_vshlq_r_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vrshlq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_n_u16 (uint16x8_t __a, int32_t __b)
+{
+ return __builtin_mve_vrshlq_n_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqshlq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_r_u16 (uint16x8_t __a, int32_t __b)
+{
+ return __builtin_mve_vqshlq_r_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqrshlq_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_n_u16 (uint16x8_t __a, int32_t __b)
+{
+ return __builtin_mve_vqrshlq_n_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminavq_s16 (uint16_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vminavq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminaq_s16 (uint16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vminaq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxavq_s16 (uint16_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmaxavq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxaq_s16 (uint16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmaxaq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_n_u16 (uint16x8_t __a, int32_t __b)
+{
+ return __builtin_mve_vbrsrq_n_uv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_n_u16 (uint16x8_t __a, const int __imm)
+{
+ return __builtin_mve_vshlq_n_uv8hi (__a, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_n_u16 (uint16x8_t __a, const int __imm)
+{
+ return __builtin_mve_vrshrq_n_uv8hi (__a, __imm);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_n_u16 (uint16x8_t __a, const int __imm)
+{
+ return __builtin_mve_vqshlq_n_uv8hi (__a, __imm);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vcmpneq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vcmpltq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vcmpltq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vcmpleq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vcmpleq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vcmpgtq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vcmpgtq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vcmpgeq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vcmpgeq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vcmpeqq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vcmpeqq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshluq_n_s16 (int16x8_t __a, const int __imm)
+{
+ return __builtin_mve_vqshluq_n_sv8hi (__a, __imm);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq_p_s16 (int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddvq_p_sv8hi (__a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vsubq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vsubq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_r_s16 (int16x8_t __a, int32_t __b)
+{
+ return __builtin_mve_vshlq_r_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vrshlq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_n_s16 (int16x8_t __a, int32_t __b)
+{
+ return __builtin_mve_vrshlq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vrmulhq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vrhaddq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqsubq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vqsubq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqshlq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_r_s16 (int16x8_t __a, int32_t __b)
+{
+ return __builtin_mve_vqshlq_r_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqrshlq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_n_s16 (int16x8_t __a, int32_t __b)
+{
+ return __builtin_mve_vqrshlq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqrdmulhq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vqrdmulhq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqdmulhq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vqdmulhq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vqaddq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vqaddq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vorrq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vornq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmulq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vmulq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmulltq_int_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmullbq_int_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmulhq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavxq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmlsdavxq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmlsdavq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavxq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmladavxq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmladavq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq_s16 (int16_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vminvq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vminq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq_s16 (int16_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmaxvq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vmaxq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vhsubq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vhsubq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot90_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vhcaddq_rot90_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot270_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vhcaddq_rot270_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vhaddq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vhaddq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_veorq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vcaddq_rot90_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vcaddq_rot270_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_n_s16 (int16x8_t __a, int32_t __b)
+{
+ return __builtin_mve_vbrsrq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vbicq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vandq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq_s16 (int32_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vaddvaq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __builtin_mve_vaddq_n_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __builtin_mve_vabdq_sv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_n_s16 (int16x8_t __a, const int __imm)
+{
+ return __builtin_mve_vshlq_n_sv8hi (__a, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_n_s16 (int16x8_t __a, const int __imm)
+{
+ return __builtin_mve_vrshrq_n_sv8hi (__a, __imm);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_n_s16 (int16x8_t __a, const int __imm)
+{
+ return __builtin_mve_vqshlq_n_sv8hi (__a, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vsubq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return __builtin_mve_vsubq_n_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vrmulhq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vrhaddq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vqsubq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return __builtin_mve_vqsubq_n_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vqaddq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return __builtin_mve_vqaddq_n_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vorrq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vornq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vmulq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return __builtin_mve_vmulq_n_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vmulltq_int_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vmullbq_int_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vmulhq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vmladavq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq_u32 (uint32_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vminvq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vminq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq_u32 (uint32_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vmaxvq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vmaxq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vhsubq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return __builtin_mve_vhsubq_n_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vhaddq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return __builtin_mve_vhaddq_n_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_veorq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return __builtin_mve_vcmpneq_n_uv4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vcmphiq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmphiq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return __builtin_mve_vcmphiq_n_uv4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vcmpeqq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return __builtin_mve_vcmpeqq_n_uv4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vcmpcsq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpcsq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return __builtin_mve_vcmpcsq_n_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vcaddq_rot90_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vcaddq_rot270_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vbicq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vandq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq_p_u32 (uint32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddvq_p_uv4si (__a, __p);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq_u32 (uint32_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vaddvaq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return __builtin_mve_vaddq_n_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __builtin_mve_vabdq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_r_u32 (uint32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vshlq_r_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vrshlq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_n_u32 (uint32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vrshlq_n_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqshlq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_r_u32 (uint32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vqshlq_r_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqrshlq_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_n_u32 (uint32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vqrshlq_n_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminavq_s32 (uint32_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vminavq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminaq_s32 (uint32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vminaq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxavq_s32 (uint32_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmaxavq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxaq_s32 (uint32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmaxaq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_n_u32 (uint32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vbrsrq_n_uv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_n_u32 (uint32x4_t __a, const int __imm)
+{
+ return __builtin_mve_vshlq_n_uv4si (__a, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_n_u32 (uint32x4_t __a, const int __imm)
+{
+ return __builtin_mve_vrshrq_n_uv4si (__a, __imm);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_n_u32 (uint32x4_t __a, const int __imm)
+{
+ return __builtin_mve_vqshlq_n_uv4si (__a, __imm);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpneq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vcmpneq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vcmpltq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpltq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vcmpltq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vcmpleq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpleq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vcmpleq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vcmpgtq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgtq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vcmpgtq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vcmpgeq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpgeq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vcmpgeq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vcmpeqq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline mve_pred16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmpeqq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vcmpeqq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshluq_n_s32 (int32x4_t __a, const int __imm)
+{
+ return __builtin_mve_vqshluq_n_sv4si (__a, __imm);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvq_p_s32 (int32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddvq_p_sv4si (__a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vsubq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vsubq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_r_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vshlq_r_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vrshlq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vrshlq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vrmulhq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vrhaddq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqsubq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqsubq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vqsubq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqshlq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_r_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vqshlq_r_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqrshlq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrshlq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vqrshlq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqrdmulhq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqrdmulhq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vqrdmulhq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqdmulhq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqdmulhq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vqdmulhq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vqaddq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqaddq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vqaddq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vorrq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vornq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmulq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vmulq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmulltq_int_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmullbq_int_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmulhq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavxq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmlsdavxq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmlsdavq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmlsdavq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavxq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmladavxq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmladavq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmladavq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminvq_s32 (int32_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vminvq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vminq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxvq_s32 (int32_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmaxvq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vmaxq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vhsubq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vhsubq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot90_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vhcaddq_rot90_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot270_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vhcaddq_rot270_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vhaddq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vhaddq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_veorq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vcaddq_rot90_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vcaddq_rot270_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vbrsrq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vbicq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vandq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddvaq_s32 (int32_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vaddvaq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __builtin_mve_vaddq_n_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __builtin_mve_vabdq_sv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_n_s32 (int32x4_t __a, const int __imm)
+{
+ return __builtin_mve_vshlq_n_sv4si (__a, __imm);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_n_s32 (int32x4_t __a, const int __imm)
+{
+ return __builtin_mve_vrshrq_n_sv4si (__a, __imm);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vqshlq_n_s32 (int32x4_t __a, const int __imm)
+{
+ return __builtin_mve_vqshlq_n_sv4si (__a, __imm);
+}
#if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */
@@ -1792,7 +4671,13 @@ extern void *__ARM_undef;
__typeof(p1) __p1 = (p1); \
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vsubq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \
- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vsubq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));})
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vsubq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));})
#define vbrsrq(p0,p1) __arm_vbrsrq(p0,p1)
#define __arm_vbrsrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
@@ -1829,6 +4714,36 @@ extern void *__ARM_undef;
int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_n_f16_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_n_f32_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
+#define vcmpgeq(p0,p1) __arm_vcmpgeq(p0,p1)
+#define __arm_vcmpgeq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgeq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgeq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgeq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpgeq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpgeq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));})
+
+#define vcmpgtq(p0,p1) __arm_vcmpgtq(p0,p1)
+#define __arm_vcmpgtq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgtq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgtq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgtq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmpgtq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmpgtq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vcmpgtq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vcmpgtq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t)));})
+
#else /* MVE Interger. */
#define vst4q(p0,p1) __arm_vst4q(p0,p1)
@@ -1991,6 +4906,746 @@ extern void *__ARM_undef;
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+#define vsubq(p0,p1) __arm_vsubq(p0,p1)
+#define __arm_vsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vsubq_n(p0,p1) __arm_vsubq_n(p0,p1)
+#define __arm_vsubq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));})
+
+#define vshlq_r(p0,p1) __arm_vshlq_r(p0,p1)
+#define __arm_vshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
+
+#define vrshlq_n(p0,p1) __arm_vrshlq_n(p0,p1)
+#define __arm_vrshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
+
+#define vrshlq(p0,p1) __arm_vrshlq(p0,p1)
+#define __arm_vrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define vrshlq_n(p0,p1) __arm_vrshlq_n(p0,p1)
+#define __arm_vrshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32_t]: __arm_vrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
+
+#define vrshlq(p0,p1) __arm_vrshlq(p0,p1)
+#define __arm_vrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define vrmulhq(p0,p1) __arm_vrmulhq(p0,p1)
+#define __arm_vrmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrmulhq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vrhaddq(p0,p1) __arm_vrhaddq(p0,p1)
+#define __arm_vrhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrhaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vqsubq_n(p0,p1) __arm_vqsubq_n(p0,p1)
+#define __arm_vqsubq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));})
+
+#define vqsubq(p0,p1) __arm_vqsubq(p0,p1)
+#define __arm_vqsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vqshlq(p0,p1) __arm_vqshlq(p0,p1)
+#define __arm_vqshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define vqshlq_r(p0,p1) __arm_vqshlq_r(p0,p1)
+#define __arm_vqshlq_r(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_r_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_r_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_r_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_r_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_r_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_r_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
+
+#define vqshluq(p0,p1) __arm_vqshluq(p0,p1)
+#define __arm_vqshluq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshluq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshluq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshluq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1));})
+
+#define vrshrq_n(p0,p1) __arm_vrshrq_n(p0,p1)
+#define __arm_vrshrq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
+
+#define vshlq_n(p0,p1) __arm_vshlq_n(p0,p1)
+#define __arm_vshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
+
+#define vqshluq_n(p0,p1) __arm_vqshluq_n(p0,p1)
+#define __arm_vqshluq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshluq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshluq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshluq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1));})
+
+#define vqshlq_n(p0,p1) __arm_vqshlq_n(p0,p1)
+#define __arm_vqshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vqshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vqshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vqshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vqshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vqshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vqshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
+
+#define vqrshlq_n(p0,p1) __arm_vqrshlq_n(p0,p1)
+#define __arm_vqrshlq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32_t]: __arm_vqrshlq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
+
+#define vqrshlq(p0,p1) __arm_vqrshlq(p0,p1)
+#define __arm_vqrshlq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrshlq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrshlq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrshlq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define vqrdmulhq_n(p0,p1) __arm_vqrdmulhq_n(p0,p1)
+#define __arm_vqrdmulhq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqrdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqrdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqrdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
+
+#define vqrdmulhq(p0,p1) __arm_vqrdmulhq(p0,p1)
+#define __arm_vqrdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqrdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqrdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqrdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define vqdmulhq_n(p0,p1) __arm_vqdmulhq_n(p0,p1)
+#define __arm_vqdmulhq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqdmulhq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqdmulhq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqdmulhq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
+
+#define vqdmulhq(p0,p1) __arm_vqdmulhq(p0,p1)
+#define __arm_vqdmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqdmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqdmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqdmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define vqaddq_n(p0,p1) __arm_vqaddq_n(p0,p1)
+#define __arm_vqaddq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vqaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vqaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vqaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vqaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vqaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vqaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));})
+
+#define vqaddq(p0,p1) __arm_vqaddq(p0,p1)
+#define __arm_vqaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vqaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vqaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vqaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vqaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vqaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vqaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vorrq_n(p0,p1) __arm_vorrq_n(p0,p1)
+#define __arm_vorrq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int32_t]: __arm_vorrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vorrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int32_t]: __arm_vorrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32_t]: __arm_vorrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int)));})
+
+#define vorrq(p0,p1) __arm_vorrq(p0,p1)
+#define __arm_vorrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vornq(p0,p1) __arm_vornq(p0,p1)
+#define __arm_vornq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vmulq_n(p0,p1) __arm_vmulq_n(p0,p1)
+#define __arm_vmulq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmulq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmulq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmulq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmulq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmulq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmulq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));})
+
+#define vmulq(p0,p1) __arm_vmulq(p0,p1)
+#define __arm_vmulq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vmulltq_int(p0,p1) __arm_vmulltq_int(p0,p1)
+#define __arm_vmulltq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vmullbq_int(p0,p1) __arm_vmullbq_int(p0,p1)
+#define __arm_vmullbq_int(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vmulhq(p0,p1) __arm_vmulhq(p0,p1)
+#define __arm_vmulhq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vminq(p0,p1) __arm_vminq(p0,p1)
+#define __arm_vminq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vminaq(p0,p1) __arm_vminaq(p0,p1)
+#define __arm_vminaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminaq_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define vmaxq(p0,p1) __arm_vmaxq(p0,p1)
+#define __arm_vmaxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vmaxaq(p0,p1) __arm_vmaxaq(p0,p1)
+#define __arm_vmaxaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxaq_s8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxaq_s16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxaq_s32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define vhsubq_n(p0,p1) __arm_vhsubq_n(p0,p1)
+#define __arm_vhsubq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vhsubq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vhsubq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vhsubq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vhsubq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vhsubq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vhsubq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));})
+
+#define vhsubq(p0,p1) __arm_vhsubq(p0,p1)
+#define __arm_vhsubq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhsubq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vhcaddq_rot90(p0,p1) __arm_vhcaddq_rot90(p0,p1)
+#define __arm_vhcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot90_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define vhcaddq_rot270(p0,p1) __arm_vhcaddq_rot270(p0,p1)
+#define __arm_vhcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot270_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define vhaddq_n(p0,p1) __arm_vhaddq_n(p0,p1)
+#define __arm_vhaddq_n(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vhaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vhaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vhaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vhaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vhaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vhaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));})
+
+#define vhaddq(p0,p1) __arm_vhaddq(p0,p1)
+#define __arm_vhaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define veorq(p0,p1) __arm_veorq(p0,p1)
+#define __arm_veorq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vcaddq_rot90(p0,p1) __arm_vcaddq_rot90(p0,p1)
+#define __arm_vcaddq_rot90(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vcaddq_rot270(p0,p1) __arm_vcaddq_rot270(p0,p1)
+#define __arm_vcaddq_rot270(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vbrsrq(p0,p1) __arm_vbrsrq(p0,p1)
+#define __arm_vbrsrq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vbrsrq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vbrsrq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
+
+#define vbicq(p0,p1) __arm_vbicq(p0,p1)
+#define __arm_vbicq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vaddq(p0,p1) __arm_vaddq(p0,p1)
+#define __arm_vaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vaddq(p0,p1) __arm_vaddq(p0,p1)
+#define __arm_vaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vaddq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vaddq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vaddq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vaddq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
+
+#define vandq(p0,p1) __arm_vandq(p0,p1)
+#define __arm_vandq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vabdq(p0,p1) __arm_vabdq(p0,p1)
+#define __arm_vabdq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vaddvaq(p0,p1) __arm_vaddvaq(p0,p1)
+#define __arm_vaddvaq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int8x16_t]: __arm_vaddvaq_s8 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int16x8_t]: __arm_vaddvaq_s16 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t]: __arm_vaddvaq_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint8x16_t]: __arm_vaddvaq_u8 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint16x8_t]: __arm_vaddvaq_u16 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t]: __arm_vaddvaq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vaddvq_p(p0,p1) __arm_vaddvq_p(p0,p1)
+#define __arm_vaddvq_p(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vaddvq_p_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vaddvq_p_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vaddvq_p_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vaddvq_p_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vaddvq_p_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vaddvq_p_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1));})
+
+#define vcmpcsq(p0,p1) __arm_vcmpcsq(p0,p1)
+#define __arm_vcmpcsq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpcsq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpcsq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpcsq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpcsq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpcsq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpcsq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));})
+
+#define vcmpeqq(p0,p1) __arm_vcmpeqq(p0,p1)
+#define __arm_vcmpeqq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpeqq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpeqq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpeqq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpeqq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpeqq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpeqq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpeqq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpeqq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpeqq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpeqq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpeqq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpeqq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));})
+
+#define vmlsdavxq(p0,p1) __arm_vmlsdavxq(p0,p1)
+#define __arm_vmlsdavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define vmlsdavq(p0,p1) __arm_vmlsdavq(p0,p1)
+#define __arm_vmlsdavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmlsdavq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmlsdavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmlsdavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define vmladavxq(p0,p1) __arm_vmladavxq(p0,p1)
+#define __arm_vmladavxq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavxq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavxq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavxq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavxq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavxq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavxq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vmladavq(p0,p1) __arm_vmladavq(p0,p1)
+#define __arm_vmladavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmladavq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmladavq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmladavq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmladavq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmladavq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmladavq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vminvq(p0,p1) __arm_vminvq(p0,p1)
+#define __arm_vminvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8_t][__ARM_mve_type_int8x16_t]: __arm_vminvq_s8 (__ARM_mve_coerce(__p0, int8_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16_t][__ARM_mve_type_int16x8_t]: __arm_vminvq_s16 (__ARM_mve_coerce(__p0, int16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t]: __arm_vminvq_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8_t][__ARM_mve_type_uint8x16_t]: __arm_vminvq_u8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16_t][__ARM_mve_type_uint16x8_t]: __arm_vminvq_u16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t]: __arm_vminvq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vminavq(p0,p1) __arm_vminavq(p0,p1)
+#define __arm_vminavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8_t][__ARM_mve_type_int8x16_t]: __arm_vminavq_s8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint16_t][__ARM_mve_type_int16x8_t]: __arm_vminavq_s16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_int32x4_t]: __arm_vminavq_s32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define vmaxvq(p0,p1) __arm_vmaxvq(p0,p1)
+#define __arm_vmaxvq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8_t][__ARM_mve_type_int8x16_t]: __arm_vmaxvq_s8 (__ARM_mve_coerce(__p0, int8_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16_t][__ARM_mve_type_int16x8_t]: __arm_vmaxvq_s16 (__ARM_mve_coerce(__p0, int16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32_t][__ARM_mve_type_int32x4_t]: __arm_vmaxvq_s32 (__ARM_mve_coerce(__p0, int32_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxvq_u8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxvq_u16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxvq_u32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, uint32x4_t)));})
+
+#define vmaxavq(p0,p1) __arm_vmaxavq(p0,p1)
+#define __arm_vmaxavq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8_t][__ARM_mve_type_int8x16_t]: __arm_vmaxavq_s8 (__ARM_mve_coerce(__p0, uint8_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_uint16_t][__ARM_mve_type_int16x8_t]: __arm_vmaxavq_s16 (__ARM_mve_coerce(__p0, uint16_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_uint32_t][__ARM_mve_type_int32x4_t]: __arm_vmaxavq_s32 (__ARM_mve_coerce(__p0, uint32_t), __ARM_mve_coerce(__p1, int32x4_t)));})
+
+#define vcmpneq(p0,p1) __arm_vcmpneq(p0,p1)
+#define __arm_vcmpneq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpneq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpneq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpneq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmpneq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmpneq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmpneq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpneq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpneq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpneq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmpneq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmpneq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmpneq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));})
+
+#define vcmpgeq(p0,p1) __arm_vcmpgeq(p0,p1)
+#define __arm_vcmpgeq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgeq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgeq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgeq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgeq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgeq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgeq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
+
+#define vcmpgtq(p0,p1) __arm_vcmpgtq(p0,p1)
+#define __arm_vcmpgtq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpgtq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpgtq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpgtq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpgtq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpgtq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpgtq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
+
+#define vcmphiq(p0,p1) __arm_vcmphiq(p0,p1)
+#define __arm_vcmphiq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcmphiq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcmphiq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcmphiq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vcmphiq_n_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vcmphiq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1, uint16_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vcmphiq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32_t)));})
+
+#define vcmpleq(p0,p1) __arm_vcmpleq(p0,p1)
+#define __arm_vcmpleq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpleq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpleq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpleq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpleq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpleq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpleq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
+
+#define vcmpltq(p0,p1) __arm_vcmpltq(p0,p1)
+#define __arm_vcmpltq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcmpltq_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcmpltq_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcmpltq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vcmpltq_n_s8 (__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vcmpltq_n_s16 (__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vcmpltq_n_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)));})
+
#endif /* MVE Floating point. */
#ifdef __cplusplus
diff --git a/gcc/config/arm/arm_mve_builtins.def b/gcc/config/arm/arm_mve_builtins.def
index 05930c9..550a67f 100644
--- a/gcc/config/arm/arm_mve_builtins.def
+++ b/gcc/config/arm/arm_mve_builtins.def
@@ -93,3 +93,123 @@ VAR3 (BINOP_UNONE_NONE_NONE, vcmpneq_s, v16qi, v8hi, v4si)
VAR3 (BINOP_UNONE_UNONE_UNONE, vcmpneq_u, v16qi, v8hi, v4si)
VAR3 (BINOP_NONE_NONE_NONE, vshlq_s, v16qi, v8hi, v4si)
VAR3 (BINOP_UNONE_UNONE_NONE, vshlq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vsubq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vsubq_n_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vrmulhq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vrhaddq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vqsubq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vqsubq_n_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vqaddq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vqaddq_n_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vorrq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vornq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vmulq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vmulq_n_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vmulltq_int_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vmullbq_int_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vmulhq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vmladavq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vminvq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vminq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vmaxvq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vmaxq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vhsubq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vhsubq_n_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vhaddq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vhaddq_n_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, veorq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vcmpneq_n_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vcmphiq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vcmphiq_n_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vcmpeqq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vcmpeqq_n_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vcmpcsq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vcmpcsq_n_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vcaddq_rot90_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vcaddq_rot270_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vbicq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vandq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vaddvq_p_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vaddvaq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vaddq_n_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_UNONE, vabdq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_NONE, vshlq_r_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_NONE, vrshlq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_NONE, vrshlq_n_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_NONE, vqshlq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_NONE, vqshlq_r_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_NONE, vqrshlq_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_NONE, vqrshlq_n_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_NONE, vminavq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_NONE, vminaq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_NONE, vmaxavq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_NONE, vmaxaq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_NONE, vbrsrq_n_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_IMM, vshlq_n_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_IMM, vrshrq_n_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_UNONE_IMM, vqshlq_n_u, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_NONE_NONE, vcmpneq_n_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_NONE_NONE, vcmpltq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_NONE_NONE, vcmpltq_n_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_NONE_NONE, vcmpleq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_NONE_NONE, vcmpleq_n_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_NONE_NONE, vcmpgtq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_NONE_NONE, vcmpgtq_n_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_NONE_NONE, vcmpgeq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_NONE_NONE, vcmpgeq_n_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_NONE_NONE, vcmpeqq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_NONE_NONE, vcmpeqq_n_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_UNONE_NONE_IMM, vqshluq_n_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_UNONE, vaddvq_p_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vsubq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vsubq_n_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vshlq_r_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vrshlq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vrshlq_n_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vrmulhq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vrhaddq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vqsubq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vqsubq_n_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vqshlq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vqshlq_r_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vqrshlq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vqrshlq_n_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vqrdmulhq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vqrdmulhq_n_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vqdmulhq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vqdmulhq_n_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vqaddq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vqaddq_n_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vorrq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vornq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vmulq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vmulq_n_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vmulltq_int_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vmullbq_int_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vmulhq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vmlsdavxq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vmlsdavq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vmladavxq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vmladavq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vminvq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vminq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vmaxvq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vmaxq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vhsubq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vhsubq_n_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vhcaddq_rot90_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vhcaddq_rot270_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vhaddq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vhaddq_n_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, veorq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vcaddq_rot90_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vcaddq_rot270_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vbrsrq_n_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vbicq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vandq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vaddvaq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vaddq_n_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_NONE, vabdq_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_IMM, vshlq_n_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_IMM, vrshrq_n_s, v16qi, v8hi, v4si)
+VAR3 (BINOP_NONE_NONE_IMM, vqshlq_n_s, v16qi, v8hi, v4si)
diff --git a/gcc/config/arm/constraints.md b/gcc/config/arm/constraints.md
index e3e202c..cdf75ab 100644
--- a/gcc/config/arm/constraints.md
+++ b/gcc/config/arm/constraints.md
@@ -34,7 +34,8 @@
;; in ARM/Thumb-2 state: Da, Db, Dc, Dd, Dn, DN, Dm, Dl, DL, Do, Dv, Dy, Di,
;; Dt, Dp, Dz, Tu
;; in Thumb-1 state: Pa, Pb, Pc, Pd, Pe
-;; in Thumb-2 state: Ha, Pj, PJ, Ps, Pt, Pu, Pv, Pw, Px, Py, Pz, Rd, Rf, Rb
+;; in Thumb-2 state: Ha, Pj, PJ, Ps, Pt, Pu, Pv, Pw, Px, Py, Pz, Rd, Rf, Rb, Ra,
+;; Rg
;; in all states: Pf, Pg
;; The following memory constraints have been used:
@@ -58,6 +59,11 @@
(and (match_code "const_int")
(match_test "TARGET_HAVE_MVE && ival >= 1 && ival <= 16")))
+(define_constraint "Ra"
+ "@internal In Thumb-2 state a constant in range 0 to 7"
+ (and (match_code "const_int")
+ (match_test "TARGET_HAVE_MVE && ival >= 0 && ival <= 7")))
+
(define_constraint "Rb"
"@internal In Thumb-2 state a constant in range 1 to 8"
(and (match_code "const_int")
@@ -68,6 +74,12 @@
(and (match_code "const_int")
(match_test "TARGET_HAVE_MVE && ival >= 1 && ival <= 32")))
+(define_constraint "Rg"
+ "@internal In Thumb-2 state a constant is one among 1, 2, 4 and 8"
+ (and (match_code "const_int")
+ (match_test "TARGET_HAVE_MVE && ((ival == 1) || (ival == 2)
+ || (ival == 4) || (ival == 8))")))
+
(define_register_constraint "t" "TARGET_32BIT ? VFP_LO_REGS : NO_REGS"
"The VFP registers @code{s0}-@code{s31}.")
diff --git a/gcc/config/arm/mve.md b/gcc/config/arm/mve.md
index 4ae608b..8e817b8 100644
--- a/gcc/config/arm/mve.md
+++ b/gcc/config/arm/mve.md
@@ -41,7 +41,32 @@
VCREATEQ_F VCVTQ_N_TO_F_S VCVTQ_N_TO_F_U VBRSRQ_N_F
VSUBQ_N_F VCREATEQ_U VCREATEQ_S VSHRQ_N_S VSHRQ_N_U
VCVTQ_N_FROM_F_S VCVTQ_N_FROM_F_U VADDLVQ_P_S
- VADDLVQ_P_U VCMPNEQ_U VCMPNEQ_S VSHLQ_S VSHLQ_U])
+ VADDLVQ_P_U VCMPNEQ_U VCMPNEQ_S VSHLQ_S VSHLQ_U VABDQ_S
+ VADDQ_N_S VADDVAQ_S VADDVQ_P_S VANDQ_S VBICQ_S
+ VBRSRQ_N_S VCADDQ_ROT270_S VCADDQ_ROT90_S VCMPEQQ_S
+ VCMPEQQ_N_S VCMPNEQ_N_S VEORQ_S VHADDQ_S VHADDQ_N_S
+ VHSUBQ_S VHSUBQ_N_S VMAXQ_S VMAXVQ_S VMINQ_S VMINVQ_S
+ VMLADAVQ_S VMULHQ_S VMULLBQ_INT_S VMULLTQ_INT_S VMULQ_S
+ VMULQ_N_S VORNQ_S VORRQ_S VQADDQ_S VQADDQ_N_S VQRSHLQ_S
+ VQRSHLQ_N_S VQSHLQ_S VQSHLQ_N_S VQSHLQ_R_S VQSUBQ_S
+ VQSUBQ_N_S VRHADDQ_S VRMULHQ_S VRSHLQ_S VRSHLQ_N_S
+ VRSHRQ_N_S VSHLQ_N_S VSHLQ_R_S VSUBQ_S VSUBQ_N_S
+ VABDQ_U VADDQ_N_U VADDVAQ_U VADDVQ_P_U VANDQ_U VBICQ_U
+ VBRSRQ_N_U VCADDQ_ROT270_U VCADDQ_ROT90_U VCMPEQQ_U
+ VCMPEQQ_N_U VCMPNEQ_N_U VEORQ_U VHADDQ_U VHADDQ_N_U
+ VHSUBQ_U VHSUBQ_N_U VMAXQ_U VMAXVQ_U VMINQ_U VMINVQ_U
+ VMLADAVQ_U VMULHQ_U VMULLBQ_INT_U VMULLTQ_INT_U VMULQ_U
+ VMULQ_N_U VORNQ_U VORRQ_U VQADDQ_U VQADDQ_N_U VQRSHLQ_U
+ VQRSHLQ_N_U VQSHLQ_U VQSHLQ_N_U VQSHLQ_R_U VQSUBQ_U
+ VQSUBQ_N_U VRHADDQ_U VRMULHQ_U VRSHLQ_U VRSHLQ_N_U
+ VRSHRQ_N_U VSHLQ_N_U VSHLQ_R_U VSUBQ_U VSUBQ_N_U
+ VCMPGEQ_N_S VCMPGEQ_S VCMPGTQ_N_S VCMPGTQ_S VCMPLEQ_N_S
+ VCMPLEQ_S VCMPLTQ_N_S VCMPLTQ_S VHCADDQ_ROT270_S
+ VHCADDQ_ROT90_S VMAXAQ_S VMAXAVQ_S VMINAQ_S VMINAVQ_S
+ VMLADAVXQ_S VMLSDAVQ_S VMLSDAVXQ_S VQDMULHQ_N_S
+ VQDMULHQ_S VQRDMULHQ_N_S VQRDMULHQ_S VQSHLUQ_N_S
+ VCMPCSQ_N_U VCMPCSQ_U VCMPHIQ_N_U VCMPHIQ_U VABDQ_M_S
+ VABDQ_M_U])
(define_mode_attr MVE_CNVT [(V8HI "V8HF") (V4SI "V4SF")
(V8HF "V8HI") (V4SF "V4SI")])
@@ -62,13 +87,46 @@
(VCREATEQ_U "u") (VCREATEQ_S "s") (VSHRQ_N_S "s")
(VSHRQ_N_U "u") (VCVTQ_N_FROM_F_S "s") (VSHLQ_U "u")
(VCVTQ_N_FROM_F_U "u") (VADDLVQ_P_S "s") (VSHLQ_S "s")
- (VADDLVQ_P_U "u") (VCMPNEQ_U "u") (VCMPNEQ_S "s")])
+ (VADDLVQ_P_U "u") (VCMPNEQ_U "u") (VCMPNEQ_S "s")
+ (VABDQ_M_S "s") (VABDQ_M_U "u") (VABDQ_S "s")
+ (VABDQ_U "u") (VADDQ_N_S "s") (VADDQ_N_U "u")
+ (VADDVQ_P_S "s") (VADDVQ_P_U "u") (VANDQ_S "s")
+ (VANDQ_U "u") (VBICQ_S "s") (VBICQ_U "u")
+ (VBRSRQ_N_S "s") (VBRSRQ_N_U "u") (VCADDQ_ROT270_S "s")
+ (VCADDQ_ROT270_U "u") (VCADDQ_ROT90_S "s")
+ (VCMPEQQ_S "s") (VCMPEQQ_U "u") (VCADDQ_ROT90_U "u")
+ (VCMPEQQ_N_S "s") (VCMPEQQ_N_U "u") (VCMPNEQ_N_S "s")
+ (VCMPNEQ_N_U "u") (VEORQ_S "s") (VEORQ_U "u")
+ (VHADDQ_N_S "s") (VHADDQ_N_U "u") (VHADDQ_S "s")
+ (VHADDQ_U "u") (VHSUBQ_N_S "s") (VHSUBQ_N_U "u")
+ (VHSUBQ_S "s") (VMAXQ_S "s") (VMAXQ_U "u") (VHSUBQ_U "u")
+ (VMAXVQ_S "s") (VMAXVQ_U "u") (VMINQ_S "s") (VMINQ_U "u")
+ (VMINVQ_S "s") (VMINVQ_U "u") (VMLADAVQ_S "s")
+ (VMLADAVQ_U "u") (VMULHQ_S "s") (VMULHQ_U "u")
+ (VMULLBQ_INT_S "s") (VMULLBQ_INT_U "u") (VQADDQ_S "s")
+ (VMULLTQ_INT_S "s") (VMULLTQ_INT_U "u") (VQADDQ_U "u")
+ (VMULQ_N_S "s") (VMULQ_N_U "u") (VMULQ_S "s")
+ (VMULQ_U "u") (VORNQ_S "s") (VORNQ_U "u") (VORRQ_S "s")
+ (VORRQ_U "u") (VQADDQ_N_S "s") (VQADDQ_N_U "u")
+ (VQRSHLQ_N_S "s") (VQRSHLQ_N_U "u") (VQRSHLQ_S "s")
+ (VQRSHLQ_U "u") (VQSHLQ_N_S "s") (VQSHLQ_N_U "u")
+ (VQSHLQ_R_S "s") (VQSHLQ_R_U "u") (VQSHLQ_S "s")
+ (VQSHLQ_U "u") (VQSUBQ_N_S "s") (VQSUBQ_N_U "u")
+ (VQSUBQ_S "s") (VQSUBQ_U "u") (VRHADDQ_S "s")
+ (VRHADDQ_U "u") (VRMULHQ_S "s") (VRMULHQ_U "u")
+ (VRSHLQ_N_S "s") (VRSHLQ_N_U "u") (VRSHLQ_S "s")
+ (VRSHLQ_U "u") (VRSHRQ_N_S "s") (VRSHRQ_N_U "u")
+ (VSHLQ_N_S "s") (VSHLQ_N_U "u") (VSHLQ_R_S "s")
+ (VSHLQ_R_U "u") (VSUBQ_N_S "s") (VSUBQ_N_U "u")
+ (VSUBQ_S "s") (VSUBQ_U "u") (VADDVAQ_S "s")
+ (VADDVAQ_U "u")])
(define_int_attr mode1 [(VCTP8Q "8") (VCTP16Q "16") (VCTP32Q "32")
(VCTP64Q "64")])
(define_mode_attr MVE_pred2 [(V16QI "mve_imm_8") (V8HI "mve_imm_16")
(V4SI "mve_imm_32")])
(define_mode_attr MVE_constraint2 [(V16QI "Rb") (V8HI "Rd") (V4SI "Rf")])
+(define_mode_attr MVE_LANES [(V16QI "16") (V8HI "8") (V4SI "4")])
(define_int_iterator VCVTQ_TO_F [VCVTQ_TO_F_S VCVTQ_TO_F_U])
(define_int_iterator VMVNQ_N [VMVNQ_N_U VMVNQ_N_S])
@@ -95,6 +153,54 @@
(define_int_iterator VADDLVQ_P [VADDLVQ_P_S VADDLVQ_P_U])
(define_int_iterator VCMPNEQ [VCMPNEQ_U VCMPNEQ_S])
(define_int_iterator VSHLQ [VSHLQ_S VSHLQ_U])
+(define_int_iterator VABDQ [VABDQ_S VABDQ_U])
+(define_int_iterator VADDQ_N [VADDQ_N_S VADDQ_N_U])
+(define_int_iterator VADDVAQ [VADDVAQ_S VADDVAQ_U])
+(define_int_iterator VADDVQ_P [VADDVQ_P_U VADDVQ_P_S])
+(define_int_iterator VANDQ [VANDQ_U VANDQ_S])
+(define_int_iterator VBICQ [VBICQ_S VBICQ_U])
+(define_int_iterator VBRSRQ_N [VBRSRQ_N_U VBRSRQ_N_S])
+(define_int_iterator VCADDQ_ROT270 [VCADDQ_ROT270_S VCADDQ_ROT270_U])
+(define_int_iterator VCADDQ_ROT90 [VCADDQ_ROT90_U VCADDQ_ROT90_S])
+(define_int_iterator VCMPEQQ [VCMPEQQ_U VCMPEQQ_S])
+(define_int_iterator VCMPEQQ_N [VCMPEQQ_N_S VCMPEQQ_N_U])
+(define_int_iterator VCMPNEQ_N [VCMPNEQ_N_U VCMPNEQ_N_S])
+(define_int_iterator VEORQ [VEORQ_U VEORQ_S])
+(define_int_iterator VHADDQ [VHADDQ_S VHADDQ_U])
+(define_int_iterator VHADDQ_N [VHADDQ_N_U VHADDQ_N_S])
+(define_int_iterator VHSUBQ [VHSUBQ_S VHSUBQ_U])
+(define_int_iterator VHSUBQ_N [VHSUBQ_N_U VHSUBQ_N_S])
+(define_int_iterator VMAXQ [VMAXQ_U VMAXQ_S])
+(define_int_iterator VMAXVQ [VMAXVQ_U VMAXVQ_S])
+(define_int_iterator VMINQ [VMINQ_S VMINQ_U])
+(define_int_iterator VMINVQ [VMINVQ_U VMINVQ_S])
+(define_int_iterator VMLADAVQ [VMLADAVQ_U VMLADAVQ_S])
+(define_int_iterator VMULHQ [VMULHQ_S VMULHQ_U])
+(define_int_iterator VMULLBQ_INT [VMULLBQ_INT_U VMULLBQ_INT_S])
+(define_int_iterator VMULLTQ_INT [VMULLTQ_INT_U VMULLTQ_INT_S])
+(define_int_iterator VMULQ [VMULQ_U VMULQ_S])
+(define_int_iterator VMULQ_N [VMULQ_N_U VMULQ_N_S])
+(define_int_iterator VORNQ [VORNQ_U VORNQ_S])
+(define_int_iterator VORRQ [VORRQ_S VORRQ_U])
+(define_int_iterator VQADDQ [VQADDQ_U VQADDQ_S])
+(define_int_iterator VQADDQ_N [VQADDQ_N_S VQADDQ_N_U])
+(define_int_iterator VQRSHLQ [VQRSHLQ_S VQRSHLQ_U])
+(define_int_iterator VQRSHLQ_N [VQRSHLQ_N_S VQRSHLQ_N_U])
+(define_int_iterator VQSHLQ [VQSHLQ_S VQSHLQ_U])
+(define_int_iterator VQSHLQ_N [VQSHLQ_N_S VQSHLQ_N_U])
+(define_int_iterator VQSHLQ_R [VQSHLQ_R_U VQSHLQ_R_S])
+(define_int_iterator VQSUBQ [VQSUBQ_U VQSUBQ_S])
+(define_int_iterator VQSUBQ_N [VQSUBQ_N_S VQSUBQ_N_U])
+(define_int_iterator VRHADDQ [VRHADDQ_S VRHADDQ_U])
+(define_int_iterator VRMULHQ [VRMULHQ_S VRMULHQ_U])
+(define_int_iterator VRSHLQ [VRSHLQ_S VRSHLQ_U])
+(define_int_iterator VRSHLQ_N [VRSHLQ_N_U VRSHLQ_N_S])
+(define_int_iterator VRSHRQ_N [VRSHRQ_N_S VRSHRQ_N_U])
+(define_int_iterator VSHLQ_N [VSHLQ_N_U VSHLQ_N_S])
+(define_int_iterator VSHLQ_R [VSHLQ_R_S VSHLQ_R_U])
+(define_int_iterator VSUBQ [VSUBQ_S VSUBQ_U])
+(define_int_iterator VSUBQ_N [VSUBQ_N_S VSUBQ_N_U])
+
(define_insn "*mve_mov<mode>"
[(set (match_operand:MVE_types 0 "nonimmediate_operand" "=w,w,r,w,w,r,w,Us")
@@ -856,3 +962,1098 @@
"vshl.<supf>%#<V_sz_elem>\t%q0, %q1, %q2"
[(set_attr "type" "mve_move")
])
+
+;;
+;; [vabdq_s, vabdq_u])
+;;
+(define_insn "mve_vabdq_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VABDQ))
+ ]
+ "TARGET_HAVE_MVE"
+ "vabd.<supf>%#<V_sz_elem> %q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vaddq_n_s, vaddq_n_u])
+;;
+(define_insn "mve_vaddq_n_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:<V_elem> 2 "s_register_operand" "r")]
+ VADDQ_N))
+ ]
+ "TARGET_HAVE_MVE"
+ "vadd.i%#<V_sz_elem> %q0, %q1, %2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vaddvaq_s, vaddvaq_u])
+;;
+(define_insn "mve_vaddvaq_<supf><mode>"
+ [
+ (set (match_operand:SI 0 "s_register_operand" "=e")
+ (unspec:SI [(match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VADDVAQ))
+ ]
+ "TARGET_HAVE_MVE"
+ "vaddva.<supf>%#<V_sz_elem> %0, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vaddvq_p_u, vaddvq_p_s])
+;;
+(define_insn "mve_vaddvq_p_<supf><mode>"
+ [
+ (set (match_operand:SI 0 "s_register_operand" "=e")
+ (unspec:SI [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:HI 2 "vpr_register_operand" "Up")]
+ VADDVQ_P))
+ ]
+ "TARGET_HAVE_MVE"
+ "vpst\;vaddvt.<supf>%#<V_sz_elem> %0, %q1"
+ [(set_attr "type" "mve_move")
+ (set_attr "length""8")])
+
+;;
+;; [vandq_u, vandq_s])
+;;
+(define_insn "mve_vandq_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VANDQ))
+ ]
+ "TARGET_HAVE_MVE"
+ "vand %q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vbicq_s, vbicq_u])
+;;
+(define_insn "mve_vbicq_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VBICQ))
+ ]
+ "TARGET_HAVE_MVE"
+ "vbic %q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vbrsrq_n_u, vbrsrq_n_s])
+;;
+(define_insn "mve_vbrsrq_n_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:SI 2 "s_register_operand" "r")]
+ VBRSRQ_N))
+ ]
+ "TARGET_HAVE_MVE"
+ "vbrsr.%#<V_sz_elem> %q0, %q1, %2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vcaddq_rot270_s, vcaddq_rot270_u])
+;;
+(define_insn "mve_vcaddq_rot270_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VCADDQ_ROT270))
+ ]
+ "TARGET_HAVE_MVE"
+ "vcadd.i%#<V_sz_elem> %q0, %q1, %q2, #270"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vcaddq_rot90_u, vcaddq_rot90_s])
+;;
+(define_insn "mve_vcaddq_rot90_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VCADDQ_ROT90))
+ ]
+ "TARGET_HAVE_MVE"
+ "vcadd.i%#<V_sz_elem> %q0, %q1, %q2, #90"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vcmpcsq_n_u])
+;;
+(define_insn "mve_vcmpcsq_n_u<mode>"
+ [
+ (set (match_operand:HI 0 "vpr_register_operand" "=Up")
+ (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:<V_elem> 2 "s_register_operand" "r")]
+ VCMPCSQ_N_U))
+ ]
+ "TARGET_HAVE_MVE"
+ "vcmp.u%#<V_sz_elem> cs, %q1, %2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vcmpcsq_u])
+;;
+(define_insn "mve_vcmpcsq_u<mode>"
+ [
+ (set (match_operand:HI 0 "vpr_register_operand" "=Up")
+ (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VCMPCSQ_U))
+ ]
+ "TARGET_HAVE_MVE"
+ "vcmp.u%#<V_sz_elem> cs, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vcmpeqq_n_s, vcmpeqq_n_u])
+;;
+(define_insn "mve_vcmpeqq_n_<supf><mode>"
+ [
+ (set (match_operand:HI 0 "vpr_register_operand" "=Up")
+ (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:<V_elem> 2 "s_register_operand" "r")]
+ VCMPEQQ_N))
+ ]
+ "TARGET_HAVE_MVE"
+ "vcmp.i%#<V_sz_elem> eq, %q1, %2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vcmpeqq_u, vcmpeqq_s])
+;;
+(define_insn "mve_vcmpeqq_<supf><mode>"
+ [
+ (set (match_operand:HI 0 "vpr_register_operand" "=Up")
+ (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VCMPEQQ))
+ ]
+ "TARGET_HAVE_MVE"
+ "vcmp.i%#<V_sz_elem> eq, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vcmpgeq_n_s])
+;;
+(define_insn "mve_vcmpgeq_n_s<mode>"
+ [
+ (set (match_operand:HI 0 "vpr_register_operand" "=Up")
+ (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:<V_elem> 2 "s_register_operand" "r")]
+ VCMPGEQ_N_S))
+ ]
+ "TARGET_HAVE_MVE"
+ "vcmp.s%#<V_sz_elem> ge, %q1, %2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vcmpgeq_s])
+;;
+(define_insn "mve_vcmpgeq_s<mode>"
+ [
+ (set (match_operand:HI 0 "vpr_register_operand" "=Up")
+ (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VCMPGEQ_S))
+ ]
+ "TARGET_HAVE_MVE"
+ "vcmp.s%#<V_sz_elem> ge, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vcmpgtq_n_s])
+;;
+(define_insn "mve_vcmpgtq_n_s<mode>"
+ [
+ (set (match_operand:HI 0 "vpr_register_operand" "=Up")
+ (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:<V_elem> 2 "s_register_operand" "r")]
+ VCMPGTQ_N_S))
+ ]
+ "TARGET_HAVE_MVE"
+ "vcmp.s%#<V_sz_elem> gt, %q1, %2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vcmpgtq_s])
+;;
+(define_insn "mve_vcmpgtq_s<mode>"
+ [
+ (set (match_operand:HI 0 "vpr_register_operand" "=Up")
+ (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VCMPGTQ_S))
+ ]
+ "TARGET_HAVE_MVE"
+ "vcmp.s%#<V_sz_elem> gt, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vcmphiq_n_u])
+;;
+(define_insn "mve_vcmphiq_n_u<mode>"
+ [
+ (set (match_operand:HI 0 "vpr_register_operand" "=Up")
+ (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:<V_elem> 2 "s_register_operand" "r")]
+ VCMPHIQ_N_U))
+ ]
+ "TARGET_HAVE_MVE"
+ "vcmp.u%#<V_sz_elem> hi, %q1, %2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vcmphiq_u])
+;;
+(define_insn "mve_vcmphiq_u<mode>"
+ [
+ (set (match_operand:HI 0 "vpr_register_operand" "=Up")
+ (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VCMPHIQ_U))
+ ]
+ "TARGET_HAVE_MVE"
+ "vcmp.u%#<V_sz_elem> hi, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vcmpleq_n_s])
+;;
+(define_insn "mve_vcmpleq_n_s<mode>"
+ [
+ (set (match_operand:HI 0 "vpr_register_operand" "=Up")
+ (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:<V_elem> 2 "s_register_operand" "r")]
+ VCMPLEQ_N_S))
+ ]
+ "TARGET_HAVE_MVE"
+ "vcmp.s%#<V_sz_elem> le, %q1, %2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vcmpleq_s])
+;;
+(define_insn "mve_vcmpleq_s<mode>"
+ [
+ (set (match_operand:HI 0 "vpr_register_operand" "=Up")
+ (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VCMPLEQ_S))
+ ]
+ "TARGET_HAVE_MVE"
+ "vcmp.s%#<V_sz_elem> le, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vcmpltq_n_s])
+;;
+(define_insn "mve_vcmpltq_n_s<mode>"
+ [
+ (set (match_operand:HI 0 "vpr_register_operand" "=Up")
+ (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:<V_elem> 2 "s_register_operand" "r")]
+ VCMPLTQ_N_S))
+ ]
+ "TARGET_HAVE_MVE"
+ "vcmp.s%#<V_sz_elem> lt, %q1, %2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vcmpltq_s])
+;;
+(define_insn "mve_vcmpltq_s<mode>"
+ [
+ (set (match_operand:HI 0 "vpr_register_operand" "=Up")
+ (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VCMPLTQ_S))
+ ]
+ "TARGET_HAVE_MVE"
+ "vcmp.s%#<V_sz_elem> lt, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vcmpneq_n_u, vcmpneq_n_s])
+;;
+(define_insn "mve_vcmpneq_n_<supf><mode>"
+ [
+ (set (match_operand:HI 0 "vpr_register_operand" "=Up")
+ (unspec:HI [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:<V_elem> 2 "s_register_operand" "r")]
+ VCMPNEQ_N))
+ ]
+ "TARGET_HAVE_MVE"
+ "vcmp.i%#<V_sz_elem> ne, %q1, %2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [veorq_u, veorq_s])
+;;
+(define_insn "mve_veorq_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VEORQ))
+ ]
+ "TARGET_HAVE_MVE"
+ "veor %q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vhaddq_n_u, vhaddq_n_s])
+;;
+(define_insn "mve_vhaddq_n_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:<V_elem> 2 "s_register_operand" "r")]
+ VHADDQ_N))
+ ]
+ "TARGET_HAVE_MVE"
+ "vhadd.<supf>%#<V_sz_elem>\t%q0, %q1, %2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vhaddq_s, vhaddq_u])
+;;
+(define_insn "mve_vhaddq_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VHADDQ))
+ ]
+ "TARGET_HAVE_MVE"
+ "vhadd.<supf>%#<V_sz_elem>\t%q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vhcaddq_rot270_s])
+;;
+(define_insn "mve_vhcaddq_rot270_s<mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VHCADDQ_ROT270_S))
+ ]
+ "TARGET_HAVE_MVE"
+ "vhcadd.s%#<V_sz_elem>\t%q0, %q1, %q2, #270"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vhcaddq_rot90_s])
+;;
+(define_insn "mve_vhcaddq_rot90_s<mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VHCADDQ_ROT90_S))
+ ]
+ "TARGET_HAVE_MVE"
+ "vhcadd.s%#<V_sz_elem>\t%q0, %q1, %q2, #90"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vhsubq_n_u, vhsubq_n_s])
+;;
+(define_insn "mve_vhsubq_n_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:<V_elem> 2 "s_register_operand" "r")]
+ VHSUBQ_N))
+ ]
+ "TARGET_HAVE_MVE"
+ "vhsub.<supf>%#<V_sz_elem>\t%q0, %q1, %2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vhsubq_s, vhsubq_u])
+;;
+(define_insn "mve_vhsubq_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VHSUBQ))
+ ]
+ "TARGET_HAVE_MVE"
+ "vhsub.<supf>%#<V_sz_elem>\t%q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vmaxaq_s])
+;;
+(define_insn "mve_vmaxaq_s<mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VMAXAQ_S))
+ ]
+ "TARGET_HAVE_MVE"
+ "vmaxa.s%#<V_sz_elem> %q0, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vmaxavq_s])
+;;
+(define_insn "mve_vmaxavq_s<mode>"
+ [
+ (set (match_operand:<V_elem> 0 "s_register_operand" "=r")
+ (unspec:<V_elem> [(match_operand:<V_elem> 1 "s_register_operand" "0")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VMAXAVQ_S))
+ ]
+ "TARGET_HAVE_MVE"
+ "vmaxav.s%#<V_sz_elem>\t%0, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vmaxq_u, vmaxq_s])
+;;
+(define_insn "mve_vmaxq_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VMAXQ))
+ ]
+ "TARGET_HAVE_MVE"
+ "vmax.<supf>%#<V_sz_elem>\t%q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vmaxvq_u, vmaxvq_s])
+;;
+(define_insn "mve_vmaxvq_<supf><mode>"
+ [
+ (set (match_operand:<V_elem> 0 "s_register_operand" "=r")
+ (unspec:<V_elem> [(match_operand:<V_elem> 1 "s_register_operand" "0")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VMAXVQ))
+ ]
+ "TARGET_HAVE_MVE"
+ "vmaxv.<supf>%#<V_sz_elem>\t%0, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vminaq_s])
+;;
+(define_insn "mve_vminaq_s<mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VMINAQ_S))
+ ]
+ "TARGET_HAVE_MVE"
+ "vmina.s%#<V_sz_elem>\t%q0, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vminavq_s])
+;;
+(define_insn "mve_vminavq_s<mode>"
+ [
+ (set (match_operand:<V_elem> 0 "s_register_operand" "=r")
+ (unspec:<V_elem> [(match_operand:<V_elem> 1 "s_register_operand" "0")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VMINAVQ_S))
+ ]
+ "TARGET_HAVE_MVE"
+ "vminav.s%#<V_sz_elem>\t%0, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vminq_s, vminq_u])
+;;
+(define_insn "mve_vminq_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VMINQ))
+ ]
+ "TARGET_HAVE_MVE"
+ "vmin.<supf>%#<V_sz_elem>\t%q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vminvq_u, vminvq_s])
+;;
+(define_insn "mve_vminvq_<supf><mode>"
+ [
+ (set (match_operand:<V_elem> 0 "s_register_operand" "=r")
+ (unspec:<V_elem> [(match_operand:<V_elem> 1 "s_register_operand" "0")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VMINVQ))
+ ]
+ "TARGET_HAVE_MVE"
+ "vminv.<supf>%#<V_sz_elem>\t%0, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vmladavq_u, vmladavq_s])
+;;
+(define_insn "mve_vmladavq_<supf><mode>"
+ [
+ (set (match_operand:SI 0 "s_register_operand" "=e")
+ (unspec:SI [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VMLADAVQ))
+ ]
+ "TARGET_HAVE_MVE"
+ "vmladav.<supf>%#<V_sz_elem>\t%0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vmladavxq_s])
+;;
+(define_insn "mve_vmladavxq_s<mode>"
+ [
+ (set (match_operand:SI 0 "s_register_operand" "=e")
+ (unspec:SI [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VMLADAVXQ_S))
+ ]
+ "TARGET_HAVE_MVE"
+ "vmladavx.s%#<V_sz_elem>\t%0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vmlsdavq_s])
+;;
+(define_insn "mve_vmlsdavq_s<mode>"
+ [
+ (set (match_operand:SI 0 "s_register_operand" "=e")
+ (unspec:SI [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VMLSDAVQ_S))
+ ]
+ "TARGET_HAVE_MVE"
+ "vmlsdav.s%#<V_sz_elem>\t%0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vmlsdavxq_s])
+;;
+(define_insn "mve_vmlsdavxq_s<mode>"
+ [
+ (set (match_operand:SI 0 "s_register_operand" "=e")
+ (unspec:SI [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VMLSDAVXQ_S))
+ ]
+ "TARGET_HAVE_MVE"
+ "vmlsdavx.s%#<V_sz_elem>\t%0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vmulhq_s, vmulhq_u])
+;;
+(define_insn "mve_vmulhq_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VMULHQ))
+ ]
+ "TARGET_HAVE_MVE"
+ "vmulh.<supf>%#<V_sz_elem>\t%q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vmullbq_int_u, vmullbq_int_s])
+;;
+(define_insn "mve_vmullbq_int_<supf><mode>"
+ [
+ (set (match_operand:<V_double_width> 0 "s_register_operand" "=w")
+ (unspec:<V_double_width> [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VMULLBQ_INT))
+ ]
+ "TARGET_HAVE_MVE"
+ "vmullb.<supf>%#<V_sz_elem>\t%q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vmulltq_int_u, vmulltq_int_s])
+;;
+(define_insn "mve_vmulltq_int_<supf><mode>"
+ [
+ (set (match_operand:<V_double_width> 0 "s_register_operand" "=w")
+ (unspec:<V_double_width> [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VMULLTQ_INT))
+ ]
+ "TARGET_HAVE_MVE"
+ "vmullt.<supf>%#<V_sz_elem>\t%q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vmulq_n_u, vmulq_n_s])
+;;
+(define_insn "mve_vmulq_n_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:<V_elem> 2 "s_register_operand" "r")]
+ VMULQ_N))
+ ]
+ "TARGET_HAVE_MVE"
+ "vmul.i%#<V_sz_elem>\t%q0, %q1, %2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vmulq_u, vmulq_s])
+;;
+(define_insn "mve_vmulq_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VMULQ))
+ ]
+ "TARGET_HAVE_MVE"
+ "vmul.i%#<V_sz_elem>\t%q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vornq_u, vornq_s])
+;;
+(define_insn "mve_vornq_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VORNQ))
+ ]
+ "TARGET_HAVE_MVE"
+ "vorn %q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vorrq_s, vorrq_u])
+;;
+(define_insn "mve_vorrq_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VORRQ))
+ ]
+ "TARGET_HAVE_MVE"
+ "vorr %q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vqaddq_n_s, vqaddq_n_u])
+;;
+(define_insn "mve_vqaddq_n_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:<V_elem> 2 "s_register_operand" "r")]
+ VQADDQ_N))
+ ]
+ "TARGET_HAVE_MVE"
+ "vqadd.<supf>%#<V_sz_elem>\t%q0, %q1, %2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vqaddq_u, vqaddq_s])
+;;
+(define_insn "mve_vqaddq_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VQADDQ))
+ ]
+ "TARGET_HAVE_MVE"
+ "vqadd.<supf>%#<V_sz_elem>\t%q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vqdmulhq_n_s])
+;;
+(define_insn "mve_vqdmulhq_n_s<mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:<V_elem> 2 "s_register_operand" "r")]
+ VQDMULHQ_N_S))
+ ]
+ "TARGET_HAVE_MVE"
+ "vqdmulh.s%#<V_sz_elem>\t%q0, %q1, %2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vqdmulhq_s])
+;;
+(define_insn "mve_vqdmulhq_s<mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VQDMULHQ_S))
+ ]
+ "TARGET_HAVE_MVE"
+ "vqdmulh.s%#<V_sz_elem>\t%q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vqrdmulhq_n_s])
+;;
+(define_insn "mve_vqrdmulhq_n_s<mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:<V_elem> 2 "s_register_operand" "r")]
+ VQRDMULHQ_N_S))
+ ]
+ "TARGET_HAVE_MVE"
+ "vqrdmulh.s%#<V_sz_elem>\t%q0, %q1, %2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vqrdmulhq_s])
+;;
+(define_insn "mve_vqrdmulhq_s<mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VQRDMULHQ_S))
+ ]
+ "TARGET_HAVE_MVE"
+ "vqrdmulh.s%#<V_sz_elem>\t%q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vqrshlq_n_s, vqrshlq_n_u])
+;;
+(define_insn "mve_vqrshlq_n_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r")]
+ VQRSHLQ_N))
+ ]
+ "TARGET_HAVE_MVE"
+ "vqrshl.<supf>%#<V_sz_elem>\t%q0, %2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vqrshlq_s, vqrshlq_u])
+;;
+(define_insn "mve_vqrshlq_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VQRSHLQ))
+ ]
+ "TARGET_HAVE_MVE"
+ "vqrshl.<supf>%#<V_sz_elem>\t%q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vqshlq_n_s, vqshlq_n_u])
+;;
+(define_insn "mve_vqshlq_n_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ VQSHLQ_N))
+ ]
+ "TARGET_HAVE_MVE"
+ "vqshl.<supf>%#<V_sz_elem>\t%q0, %q1, %2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vqshlq_r_u, vqshlq_r_s])
+;;
+(define_insn "mve_vqshlq_r_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r")]
+ VQSHLQ_R))
+ ]
+ "TARGET_HAVE_MVE"
+ "vqshl.<supf>%#<V_sz_elem>\t%q0, %2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vqshlq_s, vqshlq_u])
+;;
+(define_insn "mve_vqshlq_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VQSHLQ))
+ ]
+ "TARGET_HAVE_MVE"
+ "vqshl.<supf>%#<V_sz_elem>\t%q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vqshluq_n_s])
+;;
+(define_insn "mve_vqshluq_n_s<mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:SI 2 "mve_imm_7" "Ra")]
+ VQSHLUQ_N_S))
+ ]
+ "TARGET_HAVE_MVE"
+ "vqshlu.s%#<V_sz_elem>\t%q0, %q1, %2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vqsubq_n_s, vqsubq_n_u])
+;;
+(define_insn "mve_vqsubq_n_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:<V_elem> 2 "s_register_operand" "r")]
+ VQSUBQ_N))
+ ]
+ "TARGET_HAVE_MVE"
+ "vqsub.<supf>%#<V_sz_elem>\t%q0, %q1, %2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vqsubq_u, vqsubq_s])
+;;
+(define_insn "mve_vqsubq_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VQSUBQ))
+ ]
+ "TARGET_HAVE_MVE"
+ "vqsub.<supf>%#<V_sz_elem>\t%q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vrhaddq_s, vrhaddq_u])
+;;
+(define_insn "mve_vrhaddq_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VRHADDQ))
+ ]
+ "TARGET_HAVE_MVE"
+ "vrhadd.<supf>%#<V_sz_elem>\t%q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vrmulhq_s, vrmulhq_u])
+;;
+(define_insn "mve_vrmulhq_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VRMULHQ))
+ ]
+ "TARGET_HAVE_MVE"
+ "vrmulh.<supf>%#<V_sz_elem>\t%q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vrshlq_n_u, vrshlq_n_s])
+;;
+(define_insn "mve_vrshlq_n_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r")]
+ VRSHLQ_N))
+ ]
+ "TARGET_HAVE_MVE"
+ "vrshl.<supf>%#<V_sz_elem>\t%q0, %2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vrshlq_s, vrshlq_u])
+;;
+(define_insn "mve_vrshlq_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VRSHLQ))
+ ]
+ "TARGET_HAVE_MVE"
+ "vrshl.<supf>%#<V_sz_elem>\t%q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vrshrq_n_s, vrshrq_n_u])
+;;
+(define_insn "mve_vrshrq_n_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:SI 2 "<MVE_pred2>" "<MVE_constraint2>")]
+ VRSHRQ_N))
+ ]
+ "TARGET_HAVE_MVE"
+ "vrshr.<supf>%#<V_sz_elem>\t%q0, %q1, %2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vshlq_n_u, vshlq_n_s])
+;;
+(define_insn "mve_vshlq_n_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ VSHLQ_N))
+ ]
+ "TARGET_HAVE_MVE"
+ "vshl.<supf>%#<V_sz_elem>\t%q0, %q1, %2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vshlq_r_s, vshlq_r_u])
+;;
+(define_insn "mve_vshlq_r_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r")]
+ VSHLQ_R))
+ ]
+ "TARGET_HAVE_MVE"
+ "vshl.<supf>%#<V_sz_elem>\t%q0, %2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vsubq_n_s, vsubq_n_u])
+;;
+(define_insn "mve_vsubq_n_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:<V_elem> 2 "s_register_operand" "r")]
+ VSUBQ_N))
+ ]
+ "TARGET_HAVE_MVE"
+ "vsub.i%#<V_sz_elem>\t%q0, %q1, %2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vsubq_s, vsubq_u])
+;;
+(define_insn "mve_vsubq_<supf><mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")]
+ VSUBQ))
+ ]
+ "TARGET_HAVE_MVE"
+ "vsub.i%#<V_sz_elem>\t%q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
diff --git a/gcc/config/arm/predicates.md b/gcc/config/arm/predicates.md
index 2f5d5a7..9c9a84b 100644
--- a/gcc/config/arm/predicates.md
+++ b/gcc/config/arm/predicates.md
@@ -35,6 +35,10 @@
(define_predicate "mve_imm_16"
(match_test "satisfies_constraint_Rd (op)"))
+;; True for immediates in the range of 0 to 7 for MVE.
+(define_predicate "mve_imm_7"
+ (match_test "satisfies_constraint_Ra (op)"))
+
;; True for immediates in the range of 1 to 8 for MVE.
(define_predicate "mve_imm_8"
(match_test "satisfies_constraint_Rb (op)"))
@@ -43,6 +47,10 @@
(define_predicate "mve_imm_32"
(match_test "satisfies_constraint_Rf (op)"))
+;; True if the immediate is one among 1, 2, 4 or 8 for MVE.
+(define_predicate "mve_imm_selective_upto_8"
+ (match_test "satisfies_constraint_Rg (op)"))
+
; Predicate for stack protector guard's address in
; stack_protect_combined_set_insn and stack_protect_combined_test_insn patterns
(define_predicate "guard_addr_operand"
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index 83de77b..0635e97 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -2,6 +2,371 @@
Mihail Ionescu <mihail.ionescu@arm.com>
Srinath Parvathaneni <srinath.parvathaneni@arm.com>
+ * gcc.target/arm/mve/intrinsics/vabdq_s16.c: New test.
+ * gcc.target/arm/mve/intrinsics/vabdq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vabdq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vabdq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vabdq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vabdq_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddq_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddq_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddq_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddq_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddq_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddq_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddvaq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddvaq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddvaq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddvaq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddvaq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddvaq_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddvq_p_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddvq_p_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddvq_p_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddvq_p_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddvq_p_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddvq_p_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vandq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vandq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vandq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vandq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vandq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vandq_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbicq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbicq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbicq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbicq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbicq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbicq_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbrsrq_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbrsrq_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbrsrq_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbrsrq_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbrsrq_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbrsrq_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpcsq_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpcsq_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpcsq_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpcsq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpcsq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpcsq_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpeqq_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpeqq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpeqq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpeqq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpeqq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpeqq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpeqq_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpgeq_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpgeq_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpgeq_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpgeq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpgeq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpgeq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpgtq_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpgtq_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpgtq_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpgtq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpgtq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpgtq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmphiq_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmphiq_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmphiq_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmphiq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmphiq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmphiq_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpleq_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpleq_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpleq_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpleq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpleq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpleq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpltq_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpltq_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpltq_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpltq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpltq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpltq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpneq_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpneq_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpneq_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpneq_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpneq_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmpneq_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/veorq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/veorq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/veorq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/veorq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/veorq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/veorq_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhaddq_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhaddq_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhaddq_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhaddq_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhaddq_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhaddq_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhaddq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhaddq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhaddq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhaddq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhaddq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhaddq_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhcaddq_rot270_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhcaddq_rot270_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhcaddq_rot270_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhcaddq_rot90_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhcaddq_rot90_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhcaddq_rot90_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhsubq_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhsubq_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhsubq_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhsubq_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhsubq_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhsubq_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhsubq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhsubq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhsubq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhsubq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhsubq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhsubq_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmaxaq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmaxaq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmaxaq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmaxavq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmaxavq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmaxavq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmaxq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmaxq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmaxq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmaxq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmaxq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmaxq_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmaxvq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmaxvq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmaxvq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmaxvq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmaxvq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmaxvq_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vminaq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vminaq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vminaq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vminavq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vminavq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vminavq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vminq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vminq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vminq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vminq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vminq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vminq_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vminvq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vminvq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vminvq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vminvq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vminvq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vminvq_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmladavq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmladavq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmladavq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmladavq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmladavq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmladavq_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmladavxq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmladavxq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmladavxq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmlsdavq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmlsdavq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmlsdavq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmlsdavxq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmlsdavxq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmlsdavxq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulhq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulhq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulhq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulhq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulhq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulhq_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmullbq_int_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmullbq_int_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmullbq_int_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmullbq_int_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmullbq_int_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmullbq_int_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulltq_int_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulltq_int_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulltq_int_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulltq_int_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulltq_int_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulltq_int_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vornq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vornq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vornq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vornq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vornq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vornq_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vorrq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vorrq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vorrq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vorrq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vorrq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vorrq_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqaddq_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqaddq_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqaddq_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqaddq_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqaddq_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqaddq_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqaddq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqaddq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqaddq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqaddq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqaddq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqaddq_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqdmulhq_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqdmulhq_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqdmulhq_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqdmulhq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqdmulhq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqdmulhq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqrdmulhq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqrdmulhq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqrdmulhq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqrshlq_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqrshlq_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqrshlq_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqrshlq_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqrshlq_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqrshlq_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqrshlq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqrshlq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqrshlq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqrshlq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqrshlq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqrshlq_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqshlq_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqshlq_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqshlq_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqshlq_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqshlq_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqshlq_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqshlq_r_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqshlq_r_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqshlq_r_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqshlq_r_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqshlq_r_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqshlq_r_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqshlq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqshlq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqshlq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqshlq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqshlq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqshlq_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqshluq_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqshluq_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqshluq_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqsubq_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqsubq_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqsubq_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqsubq_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqsubq_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqsubq_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqsubq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqsubq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqsubq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqsubq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqsubq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vqsubq_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrhaddq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrhaddq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrhaddq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrhaddq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrhaddq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrhaddq_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrmulhq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrmulhq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrmulhq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrmulhq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrmulhq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrmulhq_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshlq_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshlq_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshlq_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshlq_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshlq_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshlq_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshlq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshlq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshlq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshlq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshlq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshlq_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshrq_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshrq_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshrq_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshrq_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshrq_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshrq_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_r_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_r_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_r_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_r_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_r_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_r_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_u8.c: Likewise.
+
+2020-03-17 Andre Vieira <andre.simoesdiasvieira@arm.com>
+ Mihail Ionescu <mihail.ionescu@arm.com>
+ Srinath Parvathaneni <srinath.parvathaneni@arm.com>
+
* gcc.target/arm/mve/intrinsics/vaddlvq_p_s32.c: New test.
* gcc.target/arm/mve/intrinsics/vaddlvq_p_u32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vcmpneq_s16.c: Likewise.
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_s16.c
new file mode 100644
index 0000000..17b45e7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vabdq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vabd.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vabdq (a, b);
+}
+
+/* { dg-final { scan-assembler "vabd.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_s32.c
new file mode 100644
index 0000000..9776c7c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vabdq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vabd.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vabdq (a, b);
+}
+
+/* { dg-final { scan-assembler "vabd.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_s8.c
new file mode 100644
index 0000000..9528a80
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vabdq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vabd.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vabdq (a, b);
+}
+
+/* { dg-final { scan-assembler "vabd.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_u16.c
new file mode 100644
index 0000000..0005623
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+ return vabdq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vabd.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+ return vabdq (a, b);
+}
+
+/* { dg-final { scan-assembler "vabd.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_u32.c
new file mode 100644
index 0000000..a89bea1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+ return vabdq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vabd.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+ return vabdq (a, b);
+}
+
+/* { dg-final { scan-assembler "vabd.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_u8.c
new file mode 100644
index 0000000..d724fee
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+ return vabdq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vabd.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+ return vabdq (a, b);
+}
+
+/* { dg-final { scan-assembler "vabd.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_s16.c
new file mode 100644
index 0000000..55091b4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16_t b)
+{
+ return vaddq_n_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16_t b)
+{
+ return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_s32.c
new file mode 100644
index 0000000..0b83adf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32_t b)
+{
+ return vaddq_n_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32_t b)
+{
+ return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_s8.c
new file mode 100644
index 0000000..250807e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8_t b)
+{
+ return vaddq_n_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8_t b)
+{
+ return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_u16.c
new file mode 100644
index 0000000..c0af7e3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16_t b)
+{
+ return vaddq_n_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16_t b)
+{
+ return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_u32.c
new file mode 100644
index 0000000..9ad1da0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32_t b)
+{
+ return vaddq_n_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32_t b)
+{
+ return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_u8.c
new file mode 100644
index 0000000..3a36041
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_n_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8_t b)
+{
+ return vaddq_n_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8_t b)
+{
+ return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_s16.c
new file mode 100644
index 0000000..b4b0011
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32_t
+foo (int32_t a, int16x8_t b)
+{
+ return vaddvaq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vaddva.s16" } } */
+
+int32_t
+foo1 (int32_t a, int16x8_t b)
+{
+ return vaddvaq (a, b);
+}
+
+/* { dg-final { scan-assembler "vaddva.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_s32.c
new file mode 100644
index 0000000..eba7167
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32_t
+foo (int32_t a, int32x4_t b)
+{
+ return vaddvaq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vaddva.s32" } } */
+
+int32_t
+foo1 (int32_t a, int32x4_t b)
+{
+ return vaddvaq (a, b);
+}
+
+/* { dg-final { scan-assembler "vaddva.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_s8.c
new file mode 100644
index 0000000..2471c99
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32_t
+foo (int32_t a, int8x16_t b)
+{
+ return vaddvaq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vaddva.s8" } } */
+
+int32_t
+foo1 (int32_t a, int8x16_t b)
+{
+ return vaddvaq (a, b);
+}
+
+/* { dg-final { scan-assembler "vaddva.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_u16.c
new file mode 100644
index 0000000..0d97fd0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32_t
+foo (uint32_t a, uint16x8_t b)
+{
+ return vaddvaq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vaddva.u16" } } */
+
+uint32_t
+foo1 (uint32_t a, uint16x8_t b)
+{
+ return vaddvaq (a, b);
+}
+
+/* { dg-final { scan-assembler "vaddva.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_u32.c
new file mode 100644
index 0000000..4e630c3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32_t
+foo (uint32_t a, uint32x4_t b)
+{
+ return vaddvaq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vaddva.u32" } } */
+
+uint32_t
+foo1 (uint32_t a, uint32x4_t b)
+{
+ return vaddvaq (a, b);
+}
+
+/* { dg-final { scan-assembler "vaddva.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_u8.c
new file mode 100644
index 0000000..e7c5137
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvaq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32_t
+foo (uint32_t a, uint8x16_t b)
+{
+ return vaddvaq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vaddva.u8" } } */
+
+uint32_t
+foo1 (uint32_t a, uint8x16_t b)
+{
+ return vaddvaq (a, b);
+}
+
+/* { dg-final { scan-assembler "vaddva.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_s16.c
new file mode 100644
index 0000000..b84d0d2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32_t
+foo (int16x8_t a, mve_pred16_t p)
+{
+ return vaddvq_p_s16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vaddvt.s16" } } */
+
+int32_t
+foo1 (int16x8_t a, mve_pred16_t p)
+{
+ return vaddvq_p (a, p);
+}
+
+/* { dg-final { scan-assembler "vaddvt.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_s32.c
new file mode 100644
index 0000000..0983f4e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32_t
+foo (int32x4_t a, mve_pred16_t p)
+{
+ return vaddvq_p_s32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vaddvt.s32" } } */
+
+int32_t
+foo1 (int32x4_t a, mve_pred16_t p)
+{
+ return vaddvq_p (a, p);
+}
+
+/* { dg-final { scan-assembler "vaddvt.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_s8.c
new file mode 100644
index 0000000..aef7f67
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32_t
+foo (int8x16_t a, mve_pred16_t p)
+{
+ return vaddvq_p_s8 (a, p);
+}
+
+/* { dg-final { scan-assembler "vaddvt.s8" } } */
+
+int32_t
+foo1 (int8x16_t a, mve_pred16_t p)
+{
+ return vaddvq_p (a, p);
+}
+
+/* { dg-final { scan-assembler "vaddvt.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_u16.c
new file mode 100644
index 0000000..d4c214e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32_t
+foo (uint16x8_t a, mve_pred16_t p)
+{
+ return vaddvq_p_u16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vaddvt.u16" } } */
+
+uint32_t
+foo1 (uint16x8_t a, mve_pred16_t p)
+{
+ return vaddvq_p (a, p);
+}
+
+/* { dg-final { scan-assembler "vaddvt.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_u32.c
new file mode 100644
index 0000000..604ca6a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32_t
+foo (uint32x4_t a, mve_pred16_t p)
+{
+ return vaddvq_p_u32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vaddvt.u32" } } */
+
+uint32_t
+foo1 (uint32x4_t a, mve_pred16_t p)
+{
+ return vaddvq_p (a, p);
+}
+
+/* { dg-final { scan-assembler "vaddvt.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_u8.c
new file mode 100644
index 0000000..9caeea9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddvq_p_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32_t
+foo (uint8x16_t a, mve_pred16_t p)
+{
+ return vaddvq_p_u8 (a, p);
+}
+
+/* { dg-final { scan-assembler "vaddvt.u8" } } */
+
+uint32_t
+foo1 (uint8x16_t a, mve_pred16_t p)
+{
+ return vaddvq_p (a, p);
+}
+
+/* { dg-final { scan-assembler "vaddvt.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_s16.c
new file mode 100644
index 0000000..ae989dc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vandq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vand" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vandq (a, b);
+}
+
+/* { dg-final { scan-assembler "vand" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_s32.c
new file mode 100644
index 0000000..4106cfb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vandq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vand" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vandq (a, b);
+}
+
+/* { dg-final { scan-assembler "vand" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_s8.c
new file mode 100644
index 0000000..ed78186
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vandq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vand" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vandq (a, b);
+}
+
+/* { dg-final { scan-assembler "vand" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_u16.c
new file mode 100644
index 0000000..842b829
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+ return vandq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vand" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+ return vandq (a, b);
+}
+
+/* { dg-final { scan-assembler "vand" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_u32.c
new file mode 100644
index 0000000..3998cf0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+ return vandq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vand" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+ return vandq (a, b);
+}
+
+/* { dg-final { scan-assembler "vand" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_u8.c
new file mode 100644
index 0000000..45846bc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+ return vandq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vand" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+ return vandq (a, b);
+}
+
+/* { dg-final { scan-assembler "vand" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_s16.c
new file mode 100644
index 0000000..51daf20
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vbicq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vbic" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vbicq (a, b);
+}
+
+/* { dg-final { scan-assembler "vbic" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_s32.c
new file mode 100644
index 0000000..2846494
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vbicq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vbic" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vbicq (a, b);
+}
+
+/* { dg-final { scan-assembler "vbic" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_s8.c
new file mode 100644
index 0000000..ac4753c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vbicq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vbic" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vbicq (a, b);
+}
+
+/* { dg-final { scan-assembler "vbic" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_u16.c
new file mode 100644
index 0000000..3bf72ae
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+ return vbicq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vbic" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+ return vbicq (a, b);
+}
+
+/* { dg-final { scan-assembler "vbic" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_u32.c
new file mode 100644
index 0000000..7a81637
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+ return vbicq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vbic" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+ return vbicq (a, b);
+}
+
+/* { dg-final { scan-assembler "vbic" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_u8.c
new file mode 100644
index 0000000..d834672
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+ return vbicq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vbic" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+ return vbicq (a, b);
+}
+
+/* { dg-final { scan-assembler "vbic" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_s16.c
new file mode 100644
index 0000000..f8f8263
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int32_t b)
+{
+ return vbrsrq_n_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vbrsr.16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int32_t b)
+{
+ return vbrsrq (a, b);
+}
+
+/* { dg-final { scan-assembler "vbrsr.16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_s32.c
new file mode 100644
index 0000000..3ad6933
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32_t b)
+{
+ return vbrsrq_n_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vbrsr.32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32_t b)
+{
+ return vbrsrq (a, b);
+}
+
+/* { dg-final { scan-assembler "vbrsr.32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_s8.c
new file mode 100644
index 0000000..dd40854
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int32_t b)
+{
+ return vbrsrq_n_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vbrsr.8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int32_t b)
+{
+ return vbrsrq (a, b);
+}
+
+/* { dg-final { scan-assembler "vbrsr.8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_u16.c
new file mode 100644
index 0000000..227847a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, int32_t b)
+{
+ return vbrsrq_n_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vbrsr.16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, int32_t b)
+{
+ return vbrsrq (a, b);
+}
+
+/* { dg-final { scan-assembler "vbrsr.16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_u32.c
new file mode 100644
index 0000000..5ee194c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, int32_t b)
+{
+ return vbrsrq_n_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vbrsr.32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, int32_t b)
+{
+ return vbrsrq (a, b);
+}
+
+/* { dg-final { scan-assembler "vbrsr.32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_u8.c
new file mode 100644
index 0000000..3b828a9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_n_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, int32_t b)
+{
+ return vbrsrq_n_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vbrsr.8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, int32_t b)
+{
+ return vbrsrq (a, b);
+}
+
+/* { dg-final { scan-assembler "vbrsr.8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_s16.c
new file mode 100644
index 0000000..c6490ea
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vcaddq_rot270_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcadd.i16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vcaddq_rot270 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcadd.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_s32.c
new file mode 100644
index 0000000..9a5a338
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vcaddq_rot270_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcadd.i32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vcaddq_rot270 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcadd.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_s8.c
new file mode 100644
index 0000000..d23a2aa
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vcaddq_rot270_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcadd.i8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vcaddq_rot270 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcadd.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_u16.c
new file mode 100644
index 0000000..c73aff1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+ return vcaddq_rot270_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcadd.i16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+ return vcaddq_rot270 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcadd.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_u32.c
new file mode 100644
index 0000000..b076c56
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+ return vcaddq_rot270_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcadd.i32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+ return vcaddq_rot270 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcadd.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_u8.c
new file mode 100644
index 0000000..7805b39
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+ return vcaddq_rot270_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcadd.i8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+ return vcaddq_rot270 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcadd.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_s16.c
new file mode 100644
index 0000000..4386ca9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vcaddq_rot90_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcadd.i16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vcaddq_rot90 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcadd.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_s32.c
new file mode 100644
index 0000000..5255f4e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vcaddq_rot90_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcadd.i32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vcaddq_rot90 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcadd.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_s8.c
new file mode 100644
index 0000000..c7f64ce
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vcaddq_rot90_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcadd.i8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vcaddq_rot90 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcadd.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_u16.c
new file mode 100644
index 0000000..c1a06d6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+ return vcaddq_rot90_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcadd.i16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+ return vcaddq_rot90 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcadd.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_u32.c
new file mode 100644
index 0000000..9f612a4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+ return vcaddq_rot90_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcadd.i32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+ return vcaddq_rot90 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcadd.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_u8.c
new file mode 100644
index 0000000..e19076b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+ return vcaddq_rot90_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcadd.i8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+ return vcaddq_rot90 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcadd.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_n_u16.c
new file mode 100644
index 0000000..caa2ce4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_n_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (uint16x8_t a, uint16_t b)
+{
+ return vcmpcsq_n_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.u16" } } */
+
+mve_pred16_t
+foo1 (uint16x8_t a, uint16_t b)
+{
+ return vcmpcsq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_n_u32.c
new file mode 100644
index 0000000..cc3f4d9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_n_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (uint32x4_t a, uint32_t b)
+{
+ return vcmpcsq_n_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.u32" } } */
+
+mve_pred16_t
+foo1 (uint32x4_t a, uint32_t b)
+{
+ return vcmpcsq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_n_u8.c
new file mode 100644
index 0000000..768b60d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_n_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (uint8x16_t a, uint8_t b)
+{
+ return vcmpcsq_n_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.u8" } } */
+
+mve_pred16_t
+foo1 (uint8x16_t a, uint8_t b)
+{
+ return vcmpcsq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_u16.c
new file mode 100644
index 0000000..a474ba6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+ return vcmpcsq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.u16" } } */
+
+mve_pred16_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+ return vcmpcsq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_u32.c
new file mode 100644
index 0000000..2a7fae5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+ return vcmpcsq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.u32" } } */
+
+mve_pred16_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+ return vcmpcsq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_u8.c
new file mode 100644
index 0000000..b370a91
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpcsq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+ return vcmpcsq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.u8" } } */
+
+mve_pred16_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+ return vcmpcsq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_s16.c
new file mode 100644
index 0000000..ec5ed4a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int16x8_t a, int16_t b)
+{
+ return vcmpeqq_n_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i16" } } */
+
+mve_pred16_t
+foo1 (int16x8_t a, int16_t b)
+{
+ return vcmpeqq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_s32.c
new file mode 100644
index 0000000..02262e7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int32x4_t a, int32_t b)
+{
+ return vcmpeqq_n_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i32" } } */
+
+mve_pred16_t
+foo1 (int32x4_t a, int32_t b)
+{
+ return vcmpeqq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_s8.c
new file mode 100644
index 0000000..ef91aac
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int8x16_t a, int8_t b)
+{
+ return vcmpeqq_n_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i8" } } */
+
+mve_pred16_t
+foo1 (int8x16_t a, int8_t b)
+{
+ return vcmpeqq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_u16.c
new file mode 100644
index 0000000..4f776cb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (uint16x8_t a, uint16_t b)
+{
+ return vcmpeqq_n_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i16" } } */
+
+mve_pred16_t
+foo1 (uint16x8_t a, uint16_t b)
+{
+ return vcmpeqq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_u32.c
new file mode 100644
index 0000000..ba5ce37
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (uint32x4_t a, uint32_t b)
+{
+ return vcmpeqq_n_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i32" } } */
+
+mve_pred16_t
+foo1 (uint32x4_t a, uint32_t b)
+{
+ return vcmpeqq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_u8.c
new file mode 100644
index 0000000..b6ef1d7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_n_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (uint8x16_t a, uint8_t b)
+{
+ return vcmpeqq_n_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i8" } } */
+
+mve_pred16_t
+foo1 (uint8x16_t a, uint8_t b)
+{
+ return vcmpeqq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_s16.c
new file mode 100644
index 0000000..94f6e68
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vcmpeqq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i16" } } */
+
+mve_pred16_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vcmpeqq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_s32.c
new file mode 100644
index 0000000..20ebd7f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vcmpeqq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i32" } } */
+
+mve_pred16_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vcmpeqq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_s8.c
new file mode 100644
index 0000000..a893103
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vcmpeqq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i8" } } */
+
+mve_pred16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vcmpeqq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_u16.c
new file mode 100644
index 0000000..ad1c08f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+ return vcmpeqq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i16" } } */
+
+mve_pred16_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+ return vcmpeqq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_u32.c
new file mode 100644
index 0000000..2faa789
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+ return vcmpeqq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i32" } } */
+
+mve_pred16_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+ return vcmpeqq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_u8.c
new file mode 100644
index 0000000..742e24f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpeqq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+ return vcmpeqq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i8" } } */
+
+mve_pred16_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+ return vcmpeqq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_s16.c
new file mode 100644
index 0000000..2b3bb69
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int16x8_t a, int16_t b)
+{
+ return vcmpgeq_n_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s16" } } */
+
+mve_pred16_t
+foo1 (int16x8_t a, int16_t b)
+{
+ return vcmpgeq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_s32.c
new file mode 100644
index 0000000..1cfde96
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int32x4_t a, int32_t b)
+{
+ return vcmpgeq_n_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s32" } } */
+
+mve_pred16_t
+foo1 (int32x4_t a, int32_t b)
+{
+ return vcmpgeq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_s8.c
new file mode 100644
index 0000000..269f2e6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_n_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int8x16_t a, int8_t b)
+{
+ return vcmpgeq_n_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s8" } } */
+
+mve_pred16_t
+foo1 (int8x16_t a, int8_t b)
+{
+ return vcmpgeq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_s16.c
new file mode 100644
index 0000000..1423d38
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vcmpgeq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s16" } } */
+
+mve_pred16_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vcmpgeq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_s32.c
new file mode 100644
index 0000000..49b2143
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vcmpgeq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s32" } } */
+
+mve_pred16_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vcmpgeq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_s8.c
new file mode 100644
index 0000000..fe0f602
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgeq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vcmpgeq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s8" } } */
+
+mve_pred16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vcmpgeq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_n_s16.c
new file mode 100644
index 0000000..a6fdbf7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_n_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int16x8_t a, int16_t b)
+{
+ return vcmpgtq_n_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s16" } } */
+
+mve_pred16_t
+foo1 (int16x8_t a, int16_t b)
+{
+ return vcmpgtq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_n_s32.c
new file mode 100644
index 0000000..24cb5ec
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_n_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int32x4_t a, int32_t b)
+{
+ return vcmpgtq_n_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s32" } } */
+
+mve_pred16_t
+foo1 (int32x4_t a, int32_t b)
+{
+ return vcmpgtq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_n_s8.c
new file mode 100644
index 0000000..a9fc907
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_n_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int8x16_t a, int8_t b)
+{
+ return vcmpgtq_n_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s8" } } */
+
+mve_pred16_t
+foo1 (int8x16_t a, int8_t b)
+{
+ return vcmpgtq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_s16.c
new file mode 100644
index 0000000..3e1f84f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vcmpgtq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s16" } } */
+
+mve_pred16_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vcmpgtq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_s32.c
new file mode 100644
index 0000000..70c8987
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vcmpgtq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s32" } } */
+
+mve_pred16_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vcmpgtq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_s8.c
new file mode 100644
index 0000000..a731716
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpgtq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vcmpgtq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s8" } } */
+
+mve_pred16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vcmpgtq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_n_u16.c
new file mode 100644
index 0000000..98fa9e5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_n_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (uint16x8_t a, uint16_t b)
+{
+ return vcmphiq_n_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.u16" } } */
+
+mve_pred16_t
+foo1 (uint16x8_t a, uint16_t b)
+{
+ return vcmphiq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_n_u32.c
new file mode 100644
index 0000000..f3dc573
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_n_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (uint32x4_t a, uint32_t b)
+{
+ return vcmphiq_n_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.u32" } } */
+
+mve_pred16_t
+foo1 (uint32x4_t a, uint32_t b)
+{
+ return vcmphiq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_n_u8.c
new file mode 100644
index 0000000..84aefb3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_n_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (uint8x16_t a, uint8_t b)
+{
+ return vcmphiq_n_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.u8" } } */
+
+mve_pred16_t
+foo1 (uint8x16_t a, uint8_t b)
+{
+ return vcmphiq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_u16.c
new file mode 100644
index 0000000..fda860f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+ return vcmphiq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.u16" } } */
+
+mve_pred16_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+ return vcmphiq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_u32.c
new file mode 100644
index 0000000..c0d0f3f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+ return vcmphiq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.u32" } } */
+
+mve_pred16_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+ return vcmphiq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_u8.c
new file mode 100644
index 0000000..938abc3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmphiq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+ return vcmphiq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.u8" } } */
+
+mve_pred16_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+ return vcmphiq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_n_s16.c
new file mode 100644
index 0000000..8a38936
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_n_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int16x8_t a, int16_t b)
+{
+ return vcmpleq_n_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s16" } } */
+
+mve_pred16_t
+foo1 (int16x8_t a, int16_t b)
+{
+ return vcmpleq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_n_s32.c
new file mode 100644
index 0000000..e8c489e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_n_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int32x4_t a, int32_t b)
+{
+ return vcmpleq_n_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s32" } } */
+
+mve_pred16_t
+foo1 (int32x4_t a, int32_t b)
+{
+ return vcmpleq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_n_s8.c
new file mode 100644
index 0000000..0816bbd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_n_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int8x16_t a, int8_t b)
+{
+ return vcmpleq_n_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s8" } } */
+
+mve_pred16_t
+foo1 (int8x16_t a, int8_t b)
+{
+ return vcmpleq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_s16.c
new file mode 100644
index 0000000..80f270f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vcmpleq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s16" } } */
+
+mve_pred16_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vcmpleq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_s32.c
new file mode 100644
index 0000000..5148d2a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vcmpleq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s32" } } */
+
+mve_pred16_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vcmpleq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_s8.c
new file mode 100644
index 0000000..24f9f05
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpleq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vcmpleq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s8" } } */
+
+mve_pred16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vcmpleq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_s16.c
new file mode 100644
index 0000000..a283416
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int16x8_t a, int16_t b)
+{
+ return vcmpltq_n_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s16" } } */
+
+mve_pred16_t
+foo1 (int16x8_t a, int16_t b)
+{
+ return vcmpltq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_s32.c
new file mode 100644
index 0000000..cc2427c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int32x4_t a, int32_t b)
+{
+ return vcmpltq_n_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s32" } } */
+
+mve_pred16_t
+foo1 (int32x4_t a, int32_t b)
+{
+ return vcmpltq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_s8.c
new file mode 100644
index 0000000..5f4859b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_n_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int8x16_t a, int8_t b)
+{
+ return vcmpltq_n_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s8" } } */
+
+mve_pred16_t
+foo1 (int8x16_t a, int8_t b)
+{
+ return vcmpltq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_s16.c
new file mode 100644
index 0000000..c3e3f34
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vcmpltq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s16" } } */
+
+mve_pred16_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vcmpltq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_s32.c
new file mode 100644
index 0000000..c8f82fc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vcmpltq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s32" } } */
+
+mve_pred16_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vcmpltq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_s8.c
new file mode 100644
index 0000000..5dbb4f9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpltq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vcmpltq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s8" } } */
+
+mve_pred16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vcmpltq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_s16.c
new file mode 100644
index 0000000..e1d3b18
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int16x8_t a, int16_t b)
+{
+ return vcmpneq_n_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i16" } } */
+
+mve_pred16_t
+foo1 (int16x8_t a, int16_t b)
+{
+ return vcmpneq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_s32.c
new file mode 100644
index 0000000..538b9d2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int32x4_t a, int32_t b)
+{
+ return vcmpneq_n_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i32" } } */
+
+mve_pred16_t
+foo1 (int32x4_t a, int32_t b)
+{
+ return vcmpneq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_s8.c
new file mode 100644
index 0000000..7417716
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (int8x16_t a, int8_t b)
+{
+ return vcmpneq_n_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i8" } } */
+
+mve_pred16_t
+foo1 (int8x16_t a, int8_t b)
+{
+ return vcmpneq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_u16.c
new file mode 100644
index 0000000..31eb156
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (uint16x8_t a, uint16_t b)
+{
+ return vcmpneq_n_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i16" } } */
+
+mve_pred16_t
+foo1 (uint16x8_t a, uint16_t b)
+{
+ return vcmpneq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_u32.c
new file mode 100644
index 0000000..10c328f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (uint32x4_t a, uint32_t b)
+{
+ return vcmpneq_n_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i32" } } */
+
+mve_pred16_t
+foo1 (uint32x4_t a, uint32_t b)
+{
+ return vcmpneq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_u8.c
new file mode 100644
index 0000000..2433f43
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmpneq_n_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+mve_pred16_t
+foo (uint8x16_t a, uint8_t b)
+{
+ return vcmpneq_n_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i8" } } */
+
+mve_pred16_t
+foo1 (uint8x16_t a, uint8_t b)
+{
+ return vcmpneq (a, b);
+}
+
+/* { dg-final { scan-assembler "vcmp.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_s16.c
new file mode 100644
index 0000000..30b7e3a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return veorq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "veor" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return veorq (a, b);
+}
+
+/* { dg-final { scan-assembler "veor" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_s32.c
new file mode 100644
index 0000000..ec14090
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return veorq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "veor" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return veorq (a, b);
+}
+
+/* { dg-final { scan-assembler "veor" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_s8.c
new file mode 100644
index 0000000..b0e02b1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return veorq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "veor" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return veorq (a, b);
+}
+
+/* { dg-final { scan-assembler "veor" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_u16.c
new file mode 100644
index 0000000..2985db8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+ return veorq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "veor" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+ return veorq (a, b);
+}
+
+/* { dg-final { scan-assembler "veor" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_u32.c
new file mode 100644
index 0000000..4c1dae1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+ return veorq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "veor" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+ return veorq (a, b);
+}
+
+/* { dg-final { scan-assembler "veor" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_u8.c
new file mode 100644
index 0000000..cc64128
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+ return veorq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "veor" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+ return veorq (a, b);
+}
+
+/* { dg-final { scan-assembler "veor" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_s16.c
new file mode 100644
index 0000000..75977c4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16_t b)
+{
+ return vhaddq_n_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhadd.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16_t b)
+{
+ return vhaddq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vhadd.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_s32.c
new file mode 100644
index 0000000..0bfe84d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32_t b)
+{
+ return vhaddq_n_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhadd.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32_t b)
+{
+ return vhaddq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vhadd.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_s8.c
new file mode 100644
index 0000000..4901b91
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8_t b)
+{
+ return vhaddq_n_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhadd.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8_t b)
+{
+ return vhaddq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vhadd.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_u16.c
new file mode 100644
index 0000000..88b1e2e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16_t b)
+{
+ return vhaddq_n_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhadd.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16_t b)
+{
+ return vhaddq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vhadd.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_u32.c
new file mode 100644
index 0000000..ad492e5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32_t b)
+{
+ return vhaddq_n_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhadd.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32_t b)
+{
+ return vhaddq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vhadd.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_u8.c
new file mode 100644
index 0000000..328f09a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_n_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8_t b)
+{
+ return vhaddq_n_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhadd.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8_t b)
+{
+ return vhaddq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vhadd.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_s16.c
new file mode 100644
index 0000000..fe7b305
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vhaddq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhadd.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vhaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vhadd.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_s32.c
new file mode 100644
index 0000000..ceda836
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vhaddq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhadd.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vhaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vhadd.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_s8.c
new file mode 100644
index 0000000..e117f0c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vhaddq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhadd.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vhaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vhadd.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_u16.c
new file mode 100644
index 0000000..003e59b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+ return vhaddq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhadd.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+ return vhaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vhadd.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_u32.c
new file mode 100644
index 0000000..f91044d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+ return vhaddq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhadd.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+ return vhaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vhadd.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_u8.c
new file mode 100644
index 0000000..975ffa5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+ return vhaddq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhadd.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+ return vhaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vhadd.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_s16.c
new file mode 100644
index 0000000..cea5485
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vhcaddq_rot270_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhcadd.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vhcaddq_rot270 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhcadd.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_s32.c
new file mode 100644
index 0000000..2e2e3d9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vhcaddq_rot270_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhcadd.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vhcaddq_rot270 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhcadd.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_s8.c
new file mode 100644
index 0000000..6788943
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vhcaddq_rot270_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhcadd.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vhcaddq_rot270 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhcadd.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_s16.c
new file mode 100644
index 0000000..637b2cf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vhcaddq_rot90_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhcadd.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vhcaddq_rot90 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhcadd.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_s32.c
new file mode 100644
index 0000000..52358be
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vhcaddq_rot90_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhcadd.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vhcaddq_rot90 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhcadd.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_s8.c
new file mode 100644
index 0000000..5db1e54
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vhcaddq_rot90_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhcadd.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vhcaddq_rot90 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhcadd.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_s16.c
new file mode 100644
index 0000000..723c27a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16_t b)
+{
+ return vhsubq_n_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhsub.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16_t b)
+{
+ return vhsubq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vhsub.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_s32.c
new file mode 100644
index 0000000..2d2b13d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32_t b)
+{
+ return vhsubq_n_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhsub.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32_t b)
+{
+ return vhsubq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vhsub.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_s8.c
new file mode 100644
index 0000000..4180563
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8_t b)
+{
+ return vhsubq_n_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhsub.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8_t b)
+{
+ return vhsubq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vhsub.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_u16.c
new file mode 100644
index 0000000..93e1395
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16_t b)
+{
+ return vhsubq_n_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhsub.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16_t b)
+{
+ return vhsubq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vhsub.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_u32.c
new file mode 100644
index 0000000..06dddd4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32_t b)
+{
+ return vhsubq_n_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhsub.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32_t b)
+{
+ return vhsubq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vhsub.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_u8.c
new file mode 100644
index 0000000..ea81c02
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_n_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8_t b)
+{
+ return vhsubq_n_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhsub.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8_t b)
+{
+ return vhsubq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vhsub.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_s16.c
new file mode 100644
index 0000000..89e6163
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vhsubq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhsub.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vhsubq (a, b);
+}
+
+/* { dg-final { scan-assembler "vhsub.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_s32.c
new file mode 100644
index 0000000..aac6752
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vhsubq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhsub.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vhsubq (a, b);
+}
+
+/* { dg-final { scan-assembler "vhsub.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_s8.c
new file mode 100644
index 0000000..e7008e4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vhsubq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhsub.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vhsubq (a, b);
+}
+
+/* { dg-final { scan-assembler "vhsub.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_u16.c
new file mode 100644
index 0000000..c2a5e5b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+ return vhsubq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhsub.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+ return vhsubq (a, b);
+}
+
+/* { dg-final { scan-assembler "vhsub.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_u32.c
new file mode 100644
index 0000000..634e2cb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+ return vhsubq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhsub.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+ return vhsubq (a, b);
+}
+
+/* { dg-final { scan-assembler "vhsub.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_u8.c
new file mode 100644
index 0000000..9a95fb6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+ return vhsubq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vhsub.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+ return vhsubq (a, b);
+}
+
+/* { dg-final { scan-assembler "vhsub.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_s16.c
new file mode 100644
index 0000000..36a6626
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, int16x8_t b)
+{
+ return vmaxaq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmaxa.s16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, int16x8_t b)
+{
+ return vmaxaq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmaxa.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_s32.c
new file mode 100644
index 0000000..935f848
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, int32x4_t b)
+{
+ return vmaxaq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmaxa.s32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, int32x4_t b)
+{
+ return vmaxaq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmaxa.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_s8.c
new file mode 100644
index 0000000..29a9297
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxaq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, int8x16_t b)
+{
+ return vmaxaq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmaxa.s8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, int8x16_t b)
+{
+ return vmaxaq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmaxa.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_s16.c
new file mode 100644
index 0000000..acbf404
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16_t
+foo (uint16_t a, int16x8_t b)
+{
+ return vmaxavq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmaxav.s16" } } */
+
+uint16_t
+foo1 (uint16_t a, int16x8_t b)
+{
+ return vmaxavq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmaxav.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_s32.c
new file mode 100644
index 0000000..a98318b8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32_t
+foo (uint32_t a, int32x4_t b)
+{
+ return vmaxavq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmaxav.s32" } } */
+
+uint32_t
+foo1 (uint32_t a, int32x4_t b)
+{
+ return vmaxavq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmaxav.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_s8.c
new file mode 100644
index 0000000..7007be2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxavq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8_t
+foo (uint8_t a, int8x16_t b)
+{
+ return vmaxavq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmaxav.s8" } } */
+
+uint8_t
+foo1 (uint8_t a, int8x16_t b)
+{
+ return vmaxavq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmaxav.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_s16.c
new file mode 100644
index 0000000..8e25328
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vmaxq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmax.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vmaxq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmax.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_s32.c
new file mode 100644
index 0000000..732f68b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vmaxq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmax.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vmaxq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmax.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_s8.c
new file mode 100644
index 0000000..824b48d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vmaxq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmax.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vmaxq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmax.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_u16.c
new file mode 100644
index 0000000..07cee0b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+ return vmaxq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmax.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+ return vmaxq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmax.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_u32.c
new file mode 100644
index 0000000..d12df30
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+ return vmaxq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmax.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+ return vmaxq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmax.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_u8.c
new file mode 100644
index 0000000..b78489c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+ return vmaxq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmax.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+ return vmaxq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmax.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_s16.c
new file mode 100644
index 0000000..e529868
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16_t
+foo (int16_t a, int16x8_t b)
+{
+ return vmaxvq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmaxv.s16" } } */
+
+int16_t
+foo1 (int16_t a, int16x8_t b)
+{
+ return vmaxvq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmaxv.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_s32.c
new file mode 100644
index 0000000..d4413e8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32_t
+foo (int32_t a, int32x4_t b)
+{
+ return vmaxvq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmaxv.s32" } } */
+
+int32_t
+foo1 (int32_t a, int32x4_t b)
+{
+ return vmaxvq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmaxv.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_s8.c
new file mode 100644
index 0000000..df0a452
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8_t
+foo (int8_t a, int8x16_t b)
+{
+ return vmaxvq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmaxv.s8" } } */
+
+int8_t
+foo1 (int8_t a, int8x16_t b)
+{
+ return vmaxvq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmaxv.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_u16.c
new file mode 100644
index 0000000..9936b28
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16_t
+foo (uint16_t a, uint16x8_t b)
+{
+ return vmaxvq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmaxv.u16" } } */
+
+uint16_t
+foo1 (uint16_t a, uint16x8_t b)
+{
+ return vmaxvq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmaxv.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_u32.c
new file mode 100644
index 0000000..3a93d1f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32_t
+foo (uint32_t a, uint32x4_t b)
+{
+ return vmaxvq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmaxv.u32" } } */
+
+uint32_t
+foo1 (uint32_t a, uint32x4_t b)
+{
+ return vmaxvq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmaxv.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_u8.c
new file mode 100644
index 0000000..208e2a3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxvq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8_t
+foo (uint8_t a, uint8x16_t b)
+{
+ return vmaxvq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmaxv.u8" } } */
+
+uint8_t
+foo1 (uint8_t a, uint8x16_t b)
+{
+ return vmaxvq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmaxv.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_s16.c
new file mode 100644
index 0000000..d63314e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, int16x8_t b)
+{
+ return vminaq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmina.s16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, int16x8_t b)
+{
+ return vminaq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmina.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_s32.c
new file mode 100644
index 0000000..9c0da84
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, int32x4_t b)
+{
+ return vminaq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmina.s32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, int32x4_t b)
+{
+ return vminaq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmina.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_s8.c
new file mode 100644
index 0000000..7754688
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminaq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, int8x16_t b)
+{
+ return vminaq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmina.s8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, int8x16_t b)
+{
+ return vminaq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmina.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_s16.c
new file mode 100644
index 0000000..3fde0f6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16_t
+foo (uint16_t a, int16x8_t b)
+{
+ return vminavq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vminav.s16" } } */
+
+uint16_t
+foo1 (uint16_t a, int16x8_t b)
+{
+ return vminavq (a, b);
+}
+
+/* { dg-final { scan-assembler "vminav.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_s32.c
new file mode 100644
index 0000000..d11604f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32_t
+foo (uint32_t a, int32x4_t b)
+{
+ return vminavq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vminav.s32" } } */
+
+uint32_t
+foo1 (uint32_t a, int32x4_t b)
+{
+ return vminavq (a, b);
+}
+
+/* { dg-final { scan-assembler "vminav.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_s8.c
new file mode 100644
index 0000000..be4485d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminavq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8_t
+foo (uint8_t a, int8x16_t b)
+{
+ return vminavq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vminav.s8" } } */
+
+uint8_t
+foo1 (uint8_t a, int8x16_t b)
+{
+ return vminavq (a, b);
+}
+
+/* { dg-final { scan-assembler "vminav.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_s16.c
new file mode 100644
index 0000000..c3a6d1b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vminq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmin.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vminq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmin.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_s32.c
new file mode 100644
index 0000000..9f53f04
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vminq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmin.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vminq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmin.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_s8.c
new file mode 100644
index 0000000..7b0a077
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vminq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmin.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vminq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmin.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_u16.c
new file mode 100644
index 0000000..82ace41
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+ return vminq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmin.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+ return vminq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmin.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_u32.c
new file mode 100644
index 0000000..7649470
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+ return vminq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmin.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+ return vminq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmin.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_u8.c
new file mode 100644
index 0000000..e2e27c9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+ return vminq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmin.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+ return vminq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmin.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_s16.c
new file mode 100644
index 0000000..3c3ccd4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16_t
+foo (int16_t a, int16x8_t b)
+{
+ return vminvq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vminv.s16" } } */
+
+int16_t
+foo1 (int16_t a, int16x8_t b)
+{
+ return vminvq (a, b);
+}
+
+/* { dg-final { scan-assembler "vminv.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_s32.c
new file mode 100644
index 0000000..0d32820
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32_t
+foo (int32_t a, int32x4_t b)
+{
+ return vminvq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vminv.s32" } } */
+
+int32_t
+foo1 (int32_t a, int32x4_t b)
+{
+ return vminvq (a, b);
+}
+
+/* { dg-final { scan-assembler "vminv.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_s8.c
new file mode 100644
index 0000000..bad7e3f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8_t
+foo (int8_t a, int8x16_t b)
+{
+ return vminvq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vminv.s8" } } */
+
+int8_t
+foo1 (int8_t a, int8x16_t b)
+{
+ return vminvq (a, b);
+}
+
+/* { dg-final { scan-assembler "vminv.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_u16.c
new file mode 100644
index 0000000..bae99af
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16_t
+foo (uint16_t a, uint16x8_t b)
+{
+ return vminvq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vminv.u16" } } */
+
+uint16_t
+foo1 (uint16_t a, uint16x8_t b)
+{
+ return vminvq (a, b);
+}
+
+/* { dg-final { scan-assembler "vminv.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_u32.c
new file mode 100644
index 0000000..b706203
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32_t
+foo (uint32_t a, uint32x4_t b)
+{
+ return vminvq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vminv.u32" } } */
+
+uint32_t
+foo1 (uint32_t a, uint32x4_t b)
+{
+ return vminvq (a, b);
+}
+
+/* { dg-final { scan-assembler "vminv.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_u8.c
new file mode 100644
index 0000000..f25d324
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminvq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8_t
+foo (uint8_t a, uint8x16_t b)
+{
+ return vminvq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vminv.u8" } } */
+
+uint8_t
+foo1 (uint8_t a, uint8x16_t b)
+{
+ return vminvq (a, b);
+}
+
+/* { dg-final { scan-assembler "vminv.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_s16.c
new file mode 100644
index 0000000..9e7a8ff
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vmladavq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmladav.s16" } } */
+
+int32_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vmladavq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmladav.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_s32.c
new file mode 100644
index 0000000..a7b0c5b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vmladavq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmladav.s32" } } */
+
+int32_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vmladavq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmladav.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_s8.c
new file mode 100644
index 0000000..17ccc3c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vmladavq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmladav.s8" } } */
+
+int32_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vmladavq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmladav.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_u16.c
new file mode 100644
index 0000000..cf7e011
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+ return vmladavq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmladav.u16" } } */
+
+uint32_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+ return vmladavq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmladav.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_u32.c
new file mode 100644
index 0000000..0fd673f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+ return vmladavq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmladav.u32" } } */
+
+uint32_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+ return vmladavq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmladav.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_u8.c
new file mode 100644
index 0000000..267d8ed
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+ return vmladavq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmladav.u8" } } */
+
+uint32_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+ return vmladavq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmladav.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_s16.c
new file mode 100644
index 0000000..26c4978
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vmladavxq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmladavx.s16" } } */
+
+int32_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vmladavxq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmladavx.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_s32.c
new file mode 100644
index 0000000..c27689e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vmladavxq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmladavx.s32" } } */
+
+int32_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vmladavxq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmladavx.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_s8.c
new file mode 100644
index 0000000..c586530
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmladavxq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vmladavxq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmladavx.s8" } } */
+
+int32_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vmladavxq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmladavx.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_s16.c
new file mode 100644
index 0000000..17712c7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vmlsdavq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmlsdav.s16" } } */
+
+int32_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vmlsdavq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmlsdav.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_s32.c
new file mode 100644
index 0000000..e8d3797
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vmlsdavq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmlsdav.s32" } } */
+
+int32_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vmlsdavq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmlsdav.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_s8.c
new file mode 100644
index 0000000..6d42b89
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vmlsdavq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmlsdav.s8" } } */
+
+int32_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vmlsdavq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmlsdav.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_s16.c
new file mode 100644
index 0000000..8fa2995
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vmlsdavxq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmlsdavx.s16" } } */
+
+int32_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vmlsdavxq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmlsdavx.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_s32.c
new file mode 100644
index 0000000..d284075
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vmlsdavxq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmlsdavx.s32" } } */
+
+int32_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vmlsdavxq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmlsdavx.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_s8.c
new file mode 100644
index 0000000..8dcaa94
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmlsdavxq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vmlsdavxq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmlsdavx.s8" } } */
+
+int32_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vmlsdavxq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmlsdavx.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_s16.c
new file mode 100644
index 0000000..4fd4d09
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vmulhq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmulh.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vmulhq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmulh.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_s32.c
new file mode 100644
index 0000000..636e90e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vmulhq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmulh.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vmulhq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmulh.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_s8.c
new file mode 100644
index 0000000..b894e6e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vmulhq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmulh.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vmulhq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmulh.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_u16.c
new file mode 100644
index 0000000..d985f55
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+ return vmulhq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmulh.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+ return vmulhq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmulh.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_u32.c
new file mode 100644
index 0000000..ab81b7c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+ return vmulhq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmulh.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+ return vmulhq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmulh.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_u8.c
new file mode 100644
index 0000000..b193f69
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+ return vmulhq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmulh.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+ return vmulhq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmulh.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_s16.c
new file mode 100644
index 0000000..f08143c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vmullbq_int_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmullb.s16" } } */
+
+int32x4_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vmullbq_int (a, b);
+}
+
+/* { dg-final { scan-assembler "vmullb.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_s32.c
new file mode 100644
index 0000000..be3e424
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int64x2_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vmullbq_int_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmullb.s32" } } */
+
+int64x2_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vmullbq_int (a, b);
+}
+
+/* { dg-final { scan-assembler "vmullb.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_s8.c
new file mode 100644
index 0000000..3c9f38c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vmullbq_int_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmullb.s8" } } */
+
+int16x8_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vmullbq_int (a, b);
+}
+
+/* { dg-final { scan-assembler "vmullb.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_u16.c
new file mode 100644
index 0000000..1af9ae7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+ return vmullbq_int_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmullb.u16" } } */
+
+uint32x4_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+ return vmullbq_int (a, b);
+}
+
+/* { dg-final { scan-assembler "vmullb.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_u32.c
new file mode 100644
index 0000000..938d000
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint64x2_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+ return vmullbq_int_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmullb.u32" } } */
+
+uint64x2_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+ return vmullbq_int (a, b);
+}
+
+/* { dg-final { scan-assembler "vmullb.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_u8.c
new file mode 100644
index 0000000..5ea19ee
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+ return vmullbq_int_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmullb.u8" } } */
+
+uint16x8_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+ return vmullbq_int (a, b);
+}
+
+/* { dg-final { scan-assembler "vmullb.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_s16.c
new file mode 100644
index 0000000..19033a8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vmulltq_int_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmullt.s16" } } */
+
+int32x4_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vmulltq_int (a, b);
+}
+
+/* { dg-final { scan-assembler "vmullt.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_s32.c
new file mode 100644
index 0000000..f7d6fa6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int64x2_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vmulltq_int_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmullt.s32" } } */
+
+int64x2_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vmulltq_int (a, b);
+}
+
+/* { dg-final { scan-assembler "vmullt.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_s8.c
new file mode 100644
index 0000000..dab3ef1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vmulltq_int_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmullt.s8" } } */
+
+int16x8_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vmulltq_int (a, b);
+}
+
+/* { dg-final { scan-assembler "vmullt.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_u16.c
new file mode 100644
index 0000000..1f0d587
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+ return vmulltq_int_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmullt.u16" } } */
+
+uint32x4_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+ return vmulltq_int (a, b);
+}
+
+/* { dg-final { scan-assembler "vmullt.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_u32.c
new file mode 100644
index 0000000..71fa3be
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint64x2_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+ return vmulltq_int_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmullt.u32" } } */
+
+uint64x2_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+ return vmulltq_int (a, b);
+}
+
+/* { dg-final { scan-assembler "vmullt.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_u8.c
new file mode 100644
index 0000000..8ca4abe
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+ return vmulltq_int_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmullt.u8" } } */
+
+uint16x8_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+ return vmulltq_int (a, b);
+}
+
+/* { dg-final { scan-assembler "vmullt.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_s16.c
new file mode 100644
index 0000000..b83d7fe
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16_t b)
+{
+ return vmulq_n_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmul.i16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16_t b)
+{
+ return vmulq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vmul.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_s32.c
new file mode 100644
index 0000000..5324c16
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32_t b)
+{
+ return vmulq_n_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmul.i32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32_t b)
+{
+ return vmulq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vmul.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_s8.c
new file mode 100644
index 0000000..b0c6a31
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8_t b)
+{
+ return vmulq_n_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmul.i8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8_t b)
+{
+ return vmulq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vmul.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_u16.c
new file mode 100644
index 0000000..8b4848d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16_t b)
+{
+ return vmulq_n_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmul.i16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16_t b)
+{
+ return vmulq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vmul.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_u32.c
new file mode 100644
index 0000000..ff1754c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32_t b)
+{
+ return vmulq_n_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmul.i32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32_t b)
+{
+ return vmulq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vmul.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_u8.c
new file mode 100644
index 0000000..f68b208
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_n_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8_t b)
+{
+ return vmulq_n_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmul.i8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8_t b)
+{
+ return vmulq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vmul.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_s16.c
new file mode 100644
index 0000000..6fd2651
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vmulq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmul.i16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vmulq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmul.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_s32.c
new file mode 100644
index 0000000..e286ed5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vmulq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmul.i32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vmulq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmul.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_s8.c
new file mode 100644
index 0000000..7c87b3a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vmulq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmul.i8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vmulq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmul.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_u16.c
new file mode 100644
index 0000000..1cdbbd5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+ return vmulq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmul.i16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+ return vmulq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmul.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_u32.c
new file mode 100644
index 0000000..d042781
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+ return vmulq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmul.i32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+ return vmulq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmul.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_u8.c
new file mode 100644
index 0000000..33df8f1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+ return vmulq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vmul.i8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+ return vmulq (a, b);
+}
+
+/* { dg-final { scan-assembler "vmul.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_s16.c
new file mode 100644
index 0000000..c291a8c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vornq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vorn" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vornq (a, b);
+}
+
+/* { dg-final { scan-assembler "vorn" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_s32.c
new file mode 100644
index 0000000..066958d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vornq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vorn" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vornq (a, b);
+}
+
+/* { dg-final { scan-assembler "vorn" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_s8.c
new file mode 100644
index 0000000..4a773a3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vornq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vorn" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vornq (a, b);
+}
+
+/* { dg-final { scan-assembler "vorn" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_u16.c
new file mode 100644
index 0000000..ebcf3dd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+ return vornq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vorn" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+ return vornq (a, b);
+}
+
+/* { dg-final { scan-assembler "vorn" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_u32.c
new file mode 100644
index 0000000..742b7ae
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+ return vornq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vorn" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+ return vornq (a, b);
+}
+
+/* { dg-final { scan-assembler "vorn" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_u8.c
new file mode 100644
index 0000000..ccd699a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+ return vornq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vorn" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+ return vornq (a, b);
+}
+
+/* { dg-final { scan-assembler "vorn" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_s16.c
new file mode 100644
index 0000000..60b2700
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vorrq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vorr" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vorrq (a, b);
+}
+
+/* { dg-final { scan-assembler "vorr" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_s32.c
new file mode 100644
index 0000000..d8053ce
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vorrq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vorr" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vorrq (a, b);
+}
+
+/* { dg-final { scan-assembler "vorr" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_s8.c
new file mode 100644
index 0000000..4e7a69a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vorrq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vorr" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vorrq (a, b);
+}
+
+/* { dg-final { scan-assembler "vorr" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_u16.c
new file mode 100644
index 0000000..73225bf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+ return vorrq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vorr" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+ return vorrq (a, b);
+}
+
+/* { dg-final { scan-assembler "vorr" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_u32.c
new file mode 100644
index 0000000..98753e6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+ return vorrq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vorr" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+ return vorrq (a, b);
+}
+
+/* { dg-final { scan-assembler "vorr" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_u8.c
new file mode 100644
index 0000000..e2e1cf9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+ return vorrq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vorr" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+ return vorrq (a, b);
+}
+
+/* { dg-final { scan-assembler "vorr" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_s16.c
new file mode 100644
index 0000000..1bf77bc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16_t b)
+{
+ return vqaddq_n_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqadd.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16_t b)
+{
+ return vqaddq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vqadd.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_s32.c
new file mode 100644
index 0000000..8dd6542
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32_t b)
+{
+ return vqaddq_n_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqadd.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32_t b)
+{
+ return vqaddq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vqadd.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_s8.c
new file mode 100644
index 0000000..67cc654
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8_t b)
+{
+ return vqaddq_n_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqadd.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8_t b)
+{
+ return vqaddq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vqadd.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_u16.c
new file mode 100644
index 0000000..0ab0065
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16_t b)
+{
+ return vqaddq_n_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqadd.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16_t b)
+{
+ return vqaddq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vqadd.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_u32.c
new file mode 100644
index 0000000..35ce4eb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32_t b)
+{
+ return vqaddq_n_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqadd.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32_t b)
+{
+ return vqaddq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vqadd.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_u8.c
new file mode 100644
index 0000000..f747a7c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_n_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8_t b)
+{
+ return vqaddq_n_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqadd.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8_t b)
+{
+ return vqaddq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vqadd.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_s16.c
new file mode 100644
index 0000000..64efd37
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vqaddq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqadd.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vqaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqadd.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_s32.c
new file mode 100644
index 0000000..bd1c1bd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vqaddq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqadd.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vqaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqadd.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_s8.c
new file mode 100644
index 0000000..9e5e4c5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vqaddq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqadd.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vqaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqadd.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_u16.c
new file mode 100644
index 0000000..3ee5ff1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+ return vqaddq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqadd.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+ return vqaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqadd.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_u32.c
new file mode 100644
index 0000000..571433f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+ return vqaddq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqadd.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+ return vqaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqadd.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_u8.c
new file mode 100644
index 0000000..cba127e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqaddq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+ return vqaddq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqadd.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+ return vqaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqadd.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_n_s16.c
new file mode 100644
index 0000000..e5adac2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_n_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16_t b)
+{
+ return vqdmulhq_n_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqdmulh.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16_t b)
+{
+ return vqdmulhq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vqdmulh.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_n_s32.c
new file mode 100644
index 0000000..965bbd8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_n_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32_t b)
+{
+ return vqdmulhq_n_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqdmulh.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32_t b)
+{
+ return vqdmulhq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vqdmulh.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_n_s8.c
new file mode 100644
index 0000000..60d3517
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_n_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8_t b)
+{
+ return vqdmulhq_n_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqdmulh.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8_t b)
+{
+ return vqdmulhq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vqdmulh.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_s16.c
new file mode 100644
index 0000000..1a24bfd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vqdmulhq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqdmulh.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vqdmulhq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqdmulh.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_s32.c
new file mode 100644
index 0000000..83b74af
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vqdmulhq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqdmulh.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vqdmulhq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqdmulh.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_s8.c
new file mode 100644
index 0000000..62170b0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqdmulhq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vqdmulhq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqdmulh.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vqdmulhq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqdmulh.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s16.c
new file mode 100644
index 0000000..fdeb2c8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16_t b)
+{
+ return vqrdmulhq_n_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrdmulh.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16_t b)
+{
+ return vqrdmulhq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrdmulh.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s32.c
new file mode 100644
index 0000000..825ef08
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32_t b)
+{
+ return vqrdmulhq_n_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrdmulh.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32_t b)
+{
+ return vqrdmulhq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrdmulh.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s8.c
new file mode 100644
index 0000000..43c2932
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_n_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8_t b)
+{
+ return vqrdmulhq_n_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrdmulh.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8_t b)
+{
+ return vqrdmulhq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrdmulh.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_s16.c
new file mode 100644
index 0000000..7a7d5e2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vqrdmulhq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrdmulh.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vqrdmulhq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrdmulh.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_s32.c
new file mode 100644
index 0000000..16a2001
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vqrdmulhq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrdmulh.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vqrdmulhq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrdmulh.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_s8.c
new file mode 100644
index 0000000..3350abd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrdmulhq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vqrdmulhq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrdmulh.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vqrdmulhq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrdmulh.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_s16.c
new file mode 100644
index 0000000..c4f7a2d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int32_t b)
+{
+ return vqrshlq_n_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrshl.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int32_t b)
+{
+ return vqrshlq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrshl.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_s32.c
new file mode 100644
index 0000000..8478efa
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32_t b)
+{
+ return vqrshlq_n_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrshl.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32_t b)
+{
+ return vqrshlq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrshl.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_s8.c
new file mode 100644
index 0000000..af40991
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int32_t b)
+{
+ return vqrshlq_n_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrshl.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int32_t b)
+{
+ return vqrshlq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrshl.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_u16.c
new file mode 100644
index 0000000..20fd1bc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, int32_t b)
+{
+ return vqrshlq_n_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrshl.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, int32_t b)
+{
+ return vqrshlq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrshl.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_u32.c
new file mode 100644
index 0000000..e235c18
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, int32_t b)
+{
+ return vqrshlq_n_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrshl.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, int32_t b)
+{
+ return vqrshlq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrshl.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_u8.c
new file mode 100644
index 0000000..ecd4a5a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_n_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, int32_t b)
+{
+ return vqrshlq_n_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrshl.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, int32_t b)
+{
+ return vqrshlq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrshl.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_s16.c
new file mode 100644
index 0000000..03e9bbe
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vqrshlq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrshl.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vqrshlq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrshl.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_s32.c
new file mode 100644
index 0000000..6f0cf54
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vqrshlq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrshl.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vqrshlq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrshl.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_s8.c
new file mode 100644
index 0000000..451607e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vqrshlq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrshl.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vqrshlq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrshl.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_u16.c
new file mode 100644
index 0000000..f59823f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, int16x8_t b)
+{
+ return vqrshlq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrshl.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, int16x8_t b)
+{
+ return vqrshlq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrshl.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_u32.c
new file mode 100644
index 0000000..d3744db
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, int32x4_t b)
+{
+ return vqrshlq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrshl.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, int32x4_t b)
+{
+ return vqrshlq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrshl.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_u8.c
new file mode 100644
index 0000000..a3e8da3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqrshlq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, int8x16_t b)
+{
+ return vqrshlq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrshl.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, int8x16_t b)
+{
+ return vqrshlq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqrshl.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_s16.c
new file mode 100644
index 0000000..0880cb3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a)
+{
+ return vqshlq_n_s16 (a, 1);
+}
+
+/* { dg-final { scan-assembler "vqshl.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a)
+{
+ return vqshlq_n (a, 1);
+}
+
+/* { dg-final { scan-assembler "vqshl.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_s32.c
new file mode 100644
index 0000000..3a56f35
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a)
+{
+ return vqshlq_n_s32 (a, 1);
+}
+
+/* { dg-final { scan-assembler "vqshl.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a)
+{
+ return vqshlq_n (a, 1);
+}
+
+/* { dg-final { scan-assembler "vqshl.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_s8.c
new file mode 100644
index 0000000..28f384c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a)
+{
+ return vqshlq_n_s8 (a, 1);
+}
+
+/* { dg-final { scan-assembler "vqshl.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a)
+{
+ return vqshlq_n (a, 1);
+}
+
+/* { dg-final { scan-assembler "vqshl.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_u16.c
new file mode 100644
index 0000000..8beb722
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a)
+{
+ return vqshlq_n_u16 (a, 1);
+}
+
+/* { dg-final { scan-assembler "vqshl.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a)
+{
+ return vqshlq_n (a, 1);
+}
+
+/* { dg-final { scan-assembler "vqshl.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_u32.c
new file mode 100644
index 0000000..ef616dc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a)
+{
+ return vqshlq_n_u32 (a, 1);
+}
+
+/* { dg-final { scan-assembler "vqshl.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a)
+{
+ return vqshlq_n (a, 1);
+}
+
+/* { dg-final { scan-assembler "vqshl.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_u8.c
new file mode 100644
index 0000000..a85ef65
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_n_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a)
+{
+ return vqshlq_n_u8 (a, 1);
+}
+
+/* { dg-final { scan-assembler "vqshl.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a)
+{
+ return vqshlq_n (a, 1);
+}
+
+/* { dg-final { scan-assembler "vqshl.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_s16.c
new file mode 100644
index 0000000..46a02f1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int32_t b)
+{
+ return vqshlq_r_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqshl.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int32_t b)
+{
+ return vqshlq_r (a, b);
+}
+
+/* { dg-final { scan-assembler "vqshl.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_s32.c
new file mode 100644
index 0000000..e53f613
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32_t b)
+{
+ return vqshlq_r_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqshl.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32_t b)
+{
+ return vqshlq_r (a, b);
+}
+
+/* { dg-final { scan-assembler "vqshl.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_s8.c
new file mode 100644
index 0000000..1cbd5d6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int32_t b)
+{
+ return vqshlq_r_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqshl.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int32_t b)
+{
+ return vqshlq_r (a, b);
+}
+
+/* { dg-final { scan-assembler "vqshl.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_u16.c
new file mode 100644
index 0000000..3503245
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, int32_t b)
+{
+ return vqshlq_r_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqshl.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, int32_t b)
+{
+ return vqshlq_r (a, b);
+}
+
+/* { dg-final { scan-assembler "vqshl.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_u32.c
new file mode 100644
index 0000000..9d3dbcd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, int32_t b)
+{
+ return vqshlq_r_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqshl.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, int32_t b)
+{
+ return vqshlq_r (a, b);
+}
+
+/* { dg-final { scan-assembler "vqshl.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_u8.c
new file mode 100644
index 0000000..4466e9e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_r_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, int32_t b)
+{
+ return vqshlq_r_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqshl.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, int32_t b)
+{
+ return vqshlq_r (a, b);
+}
+
+/* { dg-final { scan-assembler "vqshl.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_s16.c
new file mode 100644
index 0000000..867e7e2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vqshlq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqshl.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vqshlq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqshl.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_s32.c
new file mode 100644
index 0000000..121debf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vqshlq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqshl.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vqshlq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqshl.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_s8.c
new file mode 100644
index 0000000..cfcb38f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vqshlq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqshl.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vqshlq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqshl.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_u16.c
new file mode 100644
index 0000000..43b53d0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, int16x8_t b)
+{
+ return vqshlq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqshl.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, int16x8_t b)
+{
+ return vqshlq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqshl.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_u32.c
new file mode 100644
index 0000000..af63eeb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, int32x4_t b)
+{
+ return vqshlq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqshl.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, int32x4_t b)
+{
+ return vqshlq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqshl.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_u8.c
new file mode 100644
index 0000000..691e7be
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshlq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, int8x16_t b)
+{
+ return vqshlq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqshl.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, int8x16_t b)
+{
+ return vqshlq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqshl.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshluq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshluq_n_s16.c
new file mode 100644
index 0000000..79418d9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshluq_n_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (int16x8_t a)
+{
+ return vqshluq_n_s16 (a, 7);
+}
+
+/* { dg-final { scan-assembler "vqshlu.s16" } } */
+
+uint16x8_t
+foo1 (int16x8_t a)
+{
+ return vqshluq_n (a, 7);
+}
+
+/* { dg-final { scan-assembler "vqshlu.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshluq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshluq_n_s32.c
new file mode 100644
index 0000000..10e8fa4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshluq_n_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (int32x4_t a)
+{
+ return vqshluq_n_s32 (a, 7);
+}
+
+/* { dg-final { scan-assembler "vqshlu.s32" } } */
+
+uint32x4_t
+foo1 (int32x4_t a)
+{
+ return vqshluq_n (a, 7);
+}
+
+/* { dg-final { scan-assembler "vqshlu.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshluq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshluq_n_s8.c
new file mode 100644
index 0000000..920b1b7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqshluq_n_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (int8x16_t a)
+{
+ return vqshluq_n_s8 (a, 7);
+}
+
+/* { dg-final { scan-assembler "vqshlu.s8" } } */
+
+uint8x16_t
+foo1 (int8x16_t a)
+{
+ return vqshluq_n (a, 7);
+}
+
+/* { dg-final { scan-assembler "vqshlu.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_s16.c
new file mode 100644
index 0000000..0da54b6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16_t b)
+{
+ return vqsubq_n_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqsub.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16_t b)
+{
+ return vqsubq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vqsub.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_s32.c
new file mode 100644
index 0000000..5db6fee
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32_t b)
+{
+ return vqsubq_n_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqsub.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32_t b)
+{
+ return vqsubq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vqsub.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_s8.c
new file mode 100644
index 0000000..96519ae
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8_t b)
+{
+ return vqsubq_n_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqsub.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8_t b)
+{
+ return vqsubq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vqsub.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_u16.c
new file mode 100644
index 0000000..c735629
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16_t b)
+{
+ return vqsubq_n_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqsub.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16_t b)
+{
+ return vqsubq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vqsub.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_u32.c
new file mode 100644
index 0000000..a196593a2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32_t b)
+{
+ return vqsubq_n_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqsub.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32_t b)
+{
+ return vqsubq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vqsub.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_u8.c
new file mode 100644
index 0000000..981d623
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_n_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8_t b)
+{
+ return vqsubq_n_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqsub.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8_t b)
+{
+ return vqsubq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vqsub.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_s16.c
new file mode 100644
index 0000000..26be587
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vqsubq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqsub.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vqsubq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqsub.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_s32.c
new file mode 100644
index 0000000..9739a15
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vqsubq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqsub.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vqsubq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqsub.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_s8.c
new file mode 100644
index 0000000..1bdf354
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vqsubq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqsub.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vqsubq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqsub.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_u16.c
new file mode 100644
index 0000000..1f29987
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+ return vqsubq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqsub.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+ return vqsubq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqsub.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_u32.c
new file mode 100644
index 0000000..6ff6121
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+ return vqsubq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqsub.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+ return vqsubq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqsub.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_u8.c
new file mode 100644
index 0000000..daa3e87
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vqsubq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+ return vqsubq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vqsub.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+ return vqsubq (a, b);
+}
+
+/* { dg-final { scan-assembler "vqsub.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_s16.c
new file mode 100644
index 0000000..5bb7897
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vrhaddq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vrhadd.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vrhaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vrhadd.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_s32.c
new file mode 100644
index 0000000..33a145c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vrhaddq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vrhadd.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vrhaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vrhadd.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_s8.c
new file mode 100644
index 0000000..9e7fe95f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vrhaddq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vrhadd.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vrhaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vrhadd.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_u16.c
new file mode 100644
index 0000000..ff0b5a7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+ return vrhaddq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vrhadd.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+ return vrhaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vrhadd.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_u32.c
new file mode 100644
index 0000000..ba16e80
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+ return vrhaddq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vrhadd.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+ return vrhaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vrhadd.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_u8.c
new file mode 100644
index 0000000..a3fadd4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+ return vrhaddq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vrhadd.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+ return vrhaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vrhadd.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_s16.c
new file mode 100644
index 0000000..44b3885
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vrmulhq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vrmulh.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vrmulhq (a, b);
+}
+
+/* { dg-final { scan-assembler "vrmulh.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_s32.c
new file mode 100644
index 0000000..174a60a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vrmulhq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vrmulh.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vrmulhq (a, b);
+}
+
+/* { dg-final { scan-assembler "vrmulh.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_s8.c
new file mode 100644
index 0000000..89dbd6a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vrmulhq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vrmulh.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vrmulhq (a, b);
+}
+
+/* { dg-final { scan-assembler "vrmulh.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_u16.c
new file mode 100644
index 0000000..4fb76f9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+ return vrmulhq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vrmulh.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+ return vrmulhq (a, b);
+}
+
+/* { dg-final { scan-assembler "vrmulh.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_u32.c
new file mode 100644
index 0000000..7b52833
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+ return vrmulhq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vrmulh.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+ return vrmulhq (a, b);
+}
+
+/* { dg-final { scan-assembler "vrmulh.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_u8.c
new file mode 100644
index 0000000..a151828
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+ return vrmulhq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vrmulh.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+ return vrmulhq (a, b);
+}
+
+/* { dg-final { scan-assembler "vrmulh.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_s16.c
new file mode 100644
index 0000000..bd380f8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int32_t b)
+{
+ return vrshlq_n_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vrshl.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int32_t b)
+{
+ return vrshlq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vrshl.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_s32.c
new file mode 100644
index 0000000..9f7051d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32_t b)
+{
+ return vrshlq_n_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vrshl.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32_t b)
+{
+ return vrshlq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vrshl.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_s8.c
new file mode 100644
index 0000000..04e3321
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int32_t b)
+{
+ return vrshlq_n_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vrshl.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int32_t b)
+{
+ return vrshlq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vrshl.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_u16.c
new file mode 100644
index 0000000..fc3c87d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, int32_t b)
+{
+ return vrshlq_n_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vrshl.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, int32_t b)
+{
+ return vrshlq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vrshl.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_u32.c
new file mode 100644
index 0000000..937f145
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, int32_t b)
+{
+ return vrshlq_n_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vrshl.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, int32_t b)
+{
+ return vrshlq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vrshl.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_u8.c
new file mode 100644
index 0000000..68c967f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_n_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, int32_t b)
+{
+ return vrshlq_n_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vrshl.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, int32_t b)
+{
+ return vrshlq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vrshl.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_s16.c
new file mode 100644
index 0000000..9cc6bce
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vrshlq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vrshl.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vrshlq (a, b);
+}
+
+/* { dg-final { scan-assembler "vrshl.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_s32.c
new file mode 100644
index 0000000..cbc68db
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vrshlq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vrshl.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vrshlq (a, b);
+}
+
+/* { dg-final { scan-assembler "vrshl.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_s8.c
new file mode 100644
index 0000000..08fbc16
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vrshlq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vrshl.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vrshlq (a, b);
+}
+
+/* { dg-final { scan-assembler "vrshl.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_u16.c
new file mode 100644
index 0000000..db530b2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, int16x8_t b)
+{
+ return vrshlq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vrshl.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, int16x8_t b)
+{
+ return vrshlq (a, b);
+}
+
+/* { dg-final { scan-assembler "vrshl.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_u32.c
new file mode 100644
index 0000000..ecb5ea2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, int32x4_t b)
+{
+ return vrshlq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vrshl.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, int32x4_t b)
+{
+ return vrshlq (a, b);
+}
+
+/* { dg-final { scan-assembler "vrshl.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_u8.c
new file mode 100644
index 0000000..09ffca0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, int8x16_t b)
+{
+ return vrshlq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vrshl.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, int8x16_t b)
+{
+ return vrshlq (a, b);
+}
+
+/* { dg-final { scan-assembler "vrshl.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_s16.c
new file mode 100644
index 0000000..04147e2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a)
+{
+ return vrshrq_n_s16 (a, 16);
+}
+
+/* { dg-final { scan-assembler "vrshr.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a)
+{
+ return vrshrq_n (a, 16);
+}
+
+/* { dg-final { scan-assembler "vrshr.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_s32.c
new file mode 100644
index 0000000..4c68cf8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a)
+{
+ return vrshrq_n_s32 (a, 32);
+}
+
+/* { dg-final { scan-assembler "vrshr.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a)
+{
+ return vrshrq_n (a, 32);
+}
+
+/* { dg-final { scan-assembler "vrshr.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_s8.c
new file mode 100644
index 0000000..5263272
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a)
+{
+ return vrshrq_n_s8 (a, 8);
+}
+
+/* { dg-final { scan-assembler "vrshr.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a)
+{
+ return vrshrq_n (a, 8);
+}
+
+/* { dg-final { scan-assembler "vrshr.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_u16.c
new file mode 100644
index 0000000..5952a48
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a)
+{
+ return vrshrq_n_u16 (a, 16);
+}
+
+/* { dg-final { scan-assembler "vrshr.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a)
+{
+ return vrshrq_n (a, 16);
+}
+
+/* { dg-final { scan-assembler "vrshr.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_u32.c
new file mode 100644
index 0000000..507f2dd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a)
+{
+ return vrshrq_n_u32 (a, 32);
+}
+
+/* { dg-final { scan-assembler "vrshr.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a)
+{
+ return vrshrq_n (a, 32);
+}
+
+/* { dg-final { scan-assembler "vrshr.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_u8.c
new file mode 100644
index 0000000..ec882da
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_n_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a)
+{
+ return vrshrq_n_u8 (a, 8);
+}
+
+/* { dg-final { scan-assembler "vrshr.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a)
+{
+ return vrshrq_n (a, 8);
+}
+
+/* { dg-final { scan-assembler "vrshr.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_s16.c
new file mode 100644
index 0000000..12bde64
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a)
+{
+ return vshlq_n_s16 (a, 1);
+}
+
+/* { dg-final { scan-assembler "vshl.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a)
+{
+ return vshlq_n (a, 1);
+}
+
+/* { dg-final { scan-assembler "vshl.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_s32.c
new file mode 100644
index 0000000..64b797b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a)
+{
+ return vshlq_n_s32 (a, 16);
+}
+
+/* { dg-final { scan-assembler "vshl.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a)
+{
+ return vshlq_n (a, 16);
+}
+
+/* { dg-final { scan-assembler "vshl.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_s8.c
new file mode 100644
index 0000000..df495c0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a)
+{
+ return vshlq_n_s8 (a, 1);
+}
+
+/* { dg-final { scan-assembler "vshl.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a)
+{
+ return vshlq_n (a, 1);
+}
+
+/* { dg-final { scan-assembler "vshl.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_u16.c
new file mode 100644
index 0000000..d2c1a72
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a)
+{
+ return vshlq_n_u16 (a, 11);
+}
+
+/* { dg-final { scan-assembler "vshl.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a)
+{
+ return vshlq_n (a, 11);
+}
+
+/* { dg-final { scan-assembler "vshl.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_u32.c
new file mode 100644
index 0000000..17c4697
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a)
+{
+ return vshlq_n_u32 (a, 1);
+}
+
+/* { dg-final { scan-assembler "vshl.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a)
+{
+ return vshlq_n (a, 1);
+}
+
+/* { dg-final { scan-assembler "vshl.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_u8.c
new file mode 100644
index 0000000..dbbfee1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_n_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a)
+{
+ return vshlq_n_u8 (a, 1);
+}
+
+/* { dg-final { scan-assembler "vshl.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a)
+{
+ return vshlq_n (a, 1);
+}
+
+/* { dg-final { scan-assembler "vshl.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_s16.c
new file mode 100644
index 0000000..1a571da
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int32_t b)
+{
+ return vshlq_r_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vshl.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int32_t b)
+{
+ return vshlq_r (a, b);
+}
+
+/* { dg-final { scan-assembler "vshl.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_s32.c
new file mode 100644
index 0000000..0402912
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32_t b)
+{
+ return vshlq_r_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vshl.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32_t b)
+{
+ return vshlq_r (a, b);
+}
+
+/* { dg-final { scan-assembler "vshl.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_s8.c
new file mode 100644
index 0000000..c75ab26
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int32_t b)
+{
+ return vshlq_r_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vshl.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int32_t b)
+{
+ return vshlq_r (a, b);
+}
+
+/* { dg-final { scan-assembler "vshl.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_u16.c
new file mode 100644
index 0000000..7f1f83c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, int32_t b)
+{
+ return vshlq_r_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vshl.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, int32_t b)
+{
+ return vshlq_r (a, b);
+}
+
+/* { dg-final { scan-assembler "vshl.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_u32.c
new file mode 100644
index 0000000..bfed41d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, int32_t b)
+{
+ return vshlq_r_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vshl.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, int32_t b)
+{
+ return vshlq_r (a, b);
+}
+
+/* { dg-final { scan-assembler "vshl.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_u8.c
new file mode 100644
index 0000000..add49f5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_r_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, int32_t b)
+{
+ return vshlq_r_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vshl.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, int32_t b)
+{
+ return vshlq_r (a, b);
+}
+
+/* { dg-final { scan-assembler "vshl.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_s16.c
new file mode 100644
index 0000000..49153b2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16_t b)
+{
+ return vsubq_n_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vsub.i16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16_t b)
+{
+ return vsubq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vsub.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_s32.c
new file mode 100644
index 0000000..c337a48
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32_t b)
+{
+ return vsubq_n_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vsub.i32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32_t b)
+{
+ return vsubq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vsub.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_s8.c
new file mode 100644
index 0000000..f3652b1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8_t b)
+{
+ return vsubq_n_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vsub.i8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8_t b)
+{
+ return vsubq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vsub.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_u16.c
new file mode 100644
index 0000000..c2f67d6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16_t b)
+{
+ return vsubq_n_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vsub.i16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16_t b)
+{
+ return vsubq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vsub.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_u32.c
new file mode 100644
index 0000000..c5b5975
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32_t b)
+{
+ return vsubq_n_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vsub.i32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32_t b)
+{
+ return vsubq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vsub.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_u8.c
new file mode 100644
index 0000000..8088ab1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_n_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8_t b)
+{
+ return vsubq_n_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vsub.i8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8_t b)
+{
+ return vsubq_n (a, b);
+}
+
+/* { dg-final { scan-assembler "vsub.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_s16.c
new file mode 100644
index 0000000..2b98e4f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_s16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vsubq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vsub.i16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vsubq (a, b);
+}
+
+/* { dg-final { scan-assembler "vsub.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_s32.c
new file mode 100644
index 0000000..847d56b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_s32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vsubq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vsub.i32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vsubq (a, b);
+}
+
+/* { dg-final { scan-assembler "vsub.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_s8.c
new file mode 100644
index 0000000..bbf40ce
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_s8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vsubq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vsub.i8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vsubq (a, b);
+}
+
+/* { dg-final { scan-assembler "vsub.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_u16.c
new file mode 100644
index 0000000..a57093c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_u16.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+ return vsubq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vsub.i16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+ return vsubq (a, b);
+}
+
+/* { dg-final { scan-assembler "vsub.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_u32.c
new file mode 100644
index 0000000..964fbfc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_u32.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+ return vsubq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vsub.i32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+ return vsubq (a, b);
+}
+
+/* { dg-final { scan-assembler "vsub.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_u8.c
new file mode 100644
index 0000000..f900ab1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_u8.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+ return vsubq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vsub.i8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+ return vsubq (a, b);
+}
+
+/* { dg-final { scan-assembler "vsub.i8" } } */