aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSrinath Parvathaneni <srinath.parvathaneni@arm.com>2020-03-20 14:14:35 +0000
committerKyrylo Tkachov <kyrylo.tkachov@arm.com>2020-03-20 14:14:35 +0000
commit261014a1be433a27af75fb7eecc77231261d84f7 (patch)
treeaa62f5bb309e40d468bf48663dfcd0c5d89ecdaf
parent3d42842c07f4143042f3dcc39a050b262bcf1b55 (diff)
downloadgcc-261014a1be433a27af75fb7eecc77231261d84f7.zip
gcc-261014a1be433a27af75fb7eecc77231261d84f7.tar.gz
gcc-261014a1be433a27af75fb7eecc77231261d84f7.tar.bz2
[ARM][GCC][9x]: MVE ACLE predicated intrinsics with (dont-care) variant.
This patch supports following MVE ACLE predicated intrinsic with `_x` (dont-care) variant. * ``_x`` (dont-care) which indicates that the false-predicated lanes have undefined values. These are syntactic sugar for merge intrinsics with a ``vuninitializedq`` inactive parameter. vabdq_x_f16, vabdq_x_f32, vabdq_x_s16, vabdq_x_s32, vabdq_x_s8, vabdq_x_u16, vabdq_x_u32, vabdq_x_u8, vabsq_x_f16, vabsq_x_f32, vabsq_x_s16, vabsq_x_s32, vabsq_x_s8, vaddq_x_f16, vaddq_x_f32, vaddq_x_n_f16, vaddq_x_n_f32, vaddq_x_n_s16, vaddq_x_n_s32, vaddq_x_n_s8, vaddq_x_n_u16, vaddq_x_n_u32, vaddq_x_n_u8, vaddq_x_s16, vaddq_x_s32, vaddq_x_s8, vaddq_x_u16, vaddq_x_u32, vaddq_x_u8, vandq_x_f16, vandq_x_f32, vandq_x_s16, vandq_x_s32, vandq_x_s8, vandq_x_u16, vandq_x_u32, vandq_x_u8, vbicq_x_f16, vbicq_x_f32, vbicq_x_s16, vbicq_x_s32, vbicq_x_s8, vbicq_x_u16, vbicq_x_u32, vbicq_x_u8, vbrsrq_x_n_f16, vbrsrq_x_n_f32, vbrsrq_x_n_s16, vbrsrq_x_n_s32, vbrsrq_x_n_s8, vbrsrq_x_n_u16, vbrsrq_x_n_u32, vbrsrq_x_n_u8, vcaddq_rot270_x_f16, vcaddq_rot270_x_f32, vcaddq_rot270_x_s16, vcaddq_rot270_x_s32, vcaddq_rot270_x_s8, vcaddq_rot270_x_u16, vcaddq_rot270_x_u32, vcaddq_rot270_x_u8, vcaddq_rot90_x_f16, vcaddq_rot90_x_f32, vcaddq_rot90_x_s16, vcaddq_rot90_x_s32, vcaddq_rot90_x_s8, vcaddq_rot90_x_u16, vcaddq_rot90_x_u32, vcaddq_rot90_x_u8, vclsq_x_s16, vclsq_x_s32, vclsq_x_s8, vclzq_x_s16, vclzq_x_s32, vclzq_x_s8, vclzq_x_u16, vclzq_x_u32, vclzq_x_u8, vcmulq_rot180_x_f16, vcmulq_rot180_x_f32, vcmulq_rot270_x_f16, vcmulq_rot270_x_f32, vcmulq_rot90_x_f16, vcmulq_rot90_x_f32, vcmulq_x_f16, vcmulq_x_f32, vcvtaq_x_s16_f16, vcvtaq_x_s32_f32, vcvtaq_x_u16_f16, vcvtaq_x_u32_f32, vcvtbq_x_f32_f16, vcvtmq_x_s16_f16, vcvtmq_x_s32_f32, vcvtmq_x_u16_f16, vcvtmq_x_u32_f32, vcvtnq_x_s16_f16, vcvtnq_x_s32_f32, vcvtnq_x_u16_f16, vcvtnq_x_u32_f32, vcvtpq_x_s16_f16, vcvtpq_x_s32_f32, vcvtpq_x_u16_f16, vcvtpq_x_u32_f32, vcvtq_x_f16_s16, vcvtq_x_f16_u16, vcvtq_x_f32_s32, vcvtq_x_f32_u32, vcvtq_x_n_f16_s16, vcvtq_x_n_f16_u16, vcvtq_x_n_f32_s32, vcvtq_x_n_f32_u32, vcvtq_x_n_s16_f16, vcvtq_x_n_s32_f32, vcvtq_x_n_u16_f16, vcvtq_x_n_u32_f32, vcvtq_x_s16_f16, vcvtq_x_s32_f32, vcvtq_x_u16_f16, vcvtq_x_u32_f32, vcvttq_x_f32_f16, vddupq_x_n_u16, vddupq_x_n_u32, vddupq_x_n_u8, vddupq_x_wb_u16, vddupq_x_wb_u32, vddupq_x_wb_u8, vdupq_x_n_f16, vdupq_x_n_f32, vdupq_x_n_s16, vdupq_x_n_s32, vdupq_x_n_s8, vdupq_x_n_u16, vdupq_x_n_u32, vdupq_x_n_u8, vdwdupq_x_n_u16, vdwdupq_x_n_u32, vdwdupq_x_n_u8, vdwdupq_x_wb_u16, vdwdupq_x_wb_u32, vdwdupq_x_wb_u8, veorq_x_f16, veorq_x_f32, veorq_x_s16, veorq_x_s32, veorq_x_s8, veorq_x_u16, veorq_x_u32, veorq_x_u8, vhaddq_x_n_s16, vhaddq_x_n_s32, vhaddq_x_n_s8, vhaddq_x_n_u16, vhaddq_x_n_u32, vhaddq_x_n_u8, vhaddq_x_s16, vhaddq_x_s32, vhaddq_x_s8, vhaddq_x_u16, vhaddq_x_u32, vhaddq_x_u8, vhcaddq_rot270_x_s16, vhcaddq_rot270_x_s32, vhcaddq_rot270_x_s8, vhcaddq_rot90_x_s16, vhcaddq_rot90_x_s32, vhcaddq_rot90_x_s8, vhsubq_x_n_s16, vhsubq_x_n_s32, vhsubq_x_n_s8, vhsubq_x_n_u16, vhsubq_x_n_u32, vhsubq_x_n_u8, vhsubq_x_s16, vhsubq_x_s32, vhsubq_x_s8, vhsubq_x_u16, vhsubq_x_u32, vhsubq_x_u8, vidupq_x_n_u16, vidupq_x_n_u32, vidupq_x_n_u8, vidupq_x_wb_u16, vidupq_x_wb_u32, vidupq_x_wb_u8, viwdupq_x_n_u16, viwdupq_x_n_u32, viwdupq_x_n_u8, viwdupq_x_wb_u16, viwdupq_x_wb_u32, viwdupq_x_wb_u8, vmaxnmq_x_f16, vmaxnmq_x_f32, vmaxq_x_s16, vmaxq_x_s32, vmaxq_x_s8, vmaxq_x_u16, vmaxq_x_u32, vmaxq_x_u8, vminnmq_x_f16, vminnmq_x_f32, vminq_x_s16, vminq_x_s32, vminq_x_s8, vminq_x_u16, vminq_x_u32, vminq_x_u8, vmovlbq_x_s16, vmovlbq_x_s8, vmovlbq_x_u16, vmovlbq_x_u8, vmovltq_x_s16, vmovltq_x_s8, vmovltq_x_u16, vmovltq_x_u8, vmulhq_x_s16, vmulhq_x_s32, vmulhq_x_s8, vmulhq_x_u16, vmulhq_x_u32, vmulhq_x_u8, vmullbq_int_x_s16, vmullbq_int_x_s32, vmullbq_int_x_s8, vmullbq_int_x_u16, vmullbq_int_x_u32, vmullbq_int_x_u8, vmullbq_poly_x_p16, vmullbq_poly_x_p8, vmulltq_int_x_s16, vmulltq_int_x_s32, vmulltq_int_x_s8, vmulltq_int_x_u16, vmulltq_int_x_u32, vmulltq_int_x_u8, vmulltq_poly_x_p16, vmulltq_poly_x_p8, vmulq_x_f16, vmulq_x_f32, vmulq_x_n_f16, vmulq_x_n_f32, vmulq_x_n_s16, vmulq_x_n_s32, vmulq_x_n_s8, vmulq_x_n_u16, vmulq_x_n_u32, vmulq_x_n_u8, vmulq_x_s16, vmulq_x_s32, vmulq_x_s8, vmulq_x_u16, vmulq_x_u32, vmulq_x_u8, vmvnq_x_n_s16, vmvnq_x_n_s32, vmvnq_x_n_u16, vmvnq_x_n_u32, vmvnq_x_s16, vmvnq_x_s32, vmvnq_x_s8, vmvnq_x_u16, vmvnq_x_u32, vmvnq_x_u8, vnegq_x_f16, vnegq_x_f32, vnegq_x_s16, vnegq_x_s32, vnegq_x_s8, vornq_x_f16, vornq_x_f32, vornq_x_s16, vornq_x_s32, vornq_x_s8, vornq_x_u16, vornq_x_u32, vornq_x_u8, vorrq_x_f16, vorrq_x_f32, vorrq_x_s16, vorrq_x_s32, vorrq_x_s8, vorrq_x_u16, vorrq_x_u32, vorrq_x_u8, vrev16q_x_s8, vrev16q_x_u8, vrev32q_x_f16, vrev32q_x_s16, vrev32q_x_s8, vrev32q_x_u16, vrev32q_x_u8, vrev64q_x_f16, vrev64q_x_f32, vrev64q_x_s16, vrev64q_x_s32, vrev64q_x_s8, vrev64q_x_u16, vrev64q_x_u32, vrev64q_x_u8, vrhaddq_x_s16, vrhaddq_x_s32, vrhaddq_x_s8, vrhaddq_x_u16, vrhaddq_x_u32, vrhaddq_x_u8, vrmulhq_x_s16, vrmulhq_x_s32, vrmulhq_x_s8, vrmulhq_x_u16, vrmulhq_x_u32, vrmulhq_x_u8, vrndaq_x_f16, vrndaq_x_f32, vrndmq_x_f16, vrndmq_x_f32, vrndnq_x_f16, vrndnq_x_f32, vrndpq_x_f16, vrndpq_x_f32, vrndq_x_f16, vrndq_x_f32, vrndxq_x_f16, vrndxq_x_f32, vrshlq_x_s16, vrshlq_x_s32, vrshlq_x_s8, vrshlq_x_u16, vrshlq_x_u32, vrshlq_x_u8, vrshrq_x_n_s16, vrshrq_x_n_s32, vrshrq_x_n_s8, vrshrq_x_n_u16, vrshrq_x_n_u32, vrshrq_x_n_u8, vshllbq_x_n_s16, vshllbq_x_n_s8, vshllbq_x_n_u16, vshllbq_x_n_u8, vshlltq_x_n_s16, vshlltq_x_n_s8, vshlltq_x_n_u16, vshlltq_x_n_u8, vshlq_x_n_s16, vshlq_x_n_s32, vshlq_x_n_s8, vshlq_x_n_u16, vshlq_x_n_u32, vshlq_x_n_u8, vshlq_x_s16, vshlq_x_s32, vshlq_x_s8, vshlq_x_u16, vshlq_x_u32, vshlq_x_u8, vshrq_x_n_s16, vshrq_x_n_s32, vshrq_x_n_s8, vshrq_x_n_u16, vshrq_x_n_u32, vshrq_x_n_u8, vsubq_x_f16, vsubq_x_f32, vsubq_x_n_f16, vsubq_x_n_f32, vsubq_x_n_s16, vsubq_x_n_s32, vsubq_x_n_s8, vsubq_x_n_u16, vsubq_x_n_u32, vsubq_x_n_u8, vsubq_x_s16, vsubq_x_s32, vsubq_x_s8, vsubq_x_u16, vsubq_x_u32, vsubq_x_u8. Please refer to M-profile Vector Extension (MVE) intrinsics [1] for more details. [1] https://developer.arm.com/architectures/instruction-sets/simd-isas/helium/mve-intrinsics 2020-03-20 Srinath Parvathaneni <srinath.parvathaneni@arm.com> * config/arm/arm_mve.h (vddupq_x_n_u8): Define macro. (vddupq_x_n_u16): Likewise. (vddupq_x_n_u32): Likewise. (vddupq_x_wb_u8): Likewise. (vddupq_x_wb_u16): Likewise. (vddupq_x_wb_u32): Likewise. (vdwdupq_x_n_u8): Likewise. (vdwdupq_x_n_u16): Likewise. (vdwdupq_x_n_u32): Likewise. (vdwdupq_x_wb_u8): Likewise. (vdwdupq_x_wb_u16): Likewise. (vdwdupq_x_wb_u32): Likewise. (vidupq_x_n_u8): Likewise. (vidupq_x_n_u16): Likewise. (vidupq_x_n_u32): Likewise. (vidupq_x_wb_u8): Likewise. (vidupq_x_wb_u16): Likewise. (vidupq_x_wb_u32): Likewise. (viwdupq_x_n_u8): Likewise. (viwdupq_x_n_u16): Likewise. (viwdupq_x_n_u32): Likewise. (viwdupq_x_wb_u8): Likewise. (viwdupq_x_wb_u16): Likewise. (viwdupq_x_wb_u32): Likewise. (vdupq_x_n_s8): Likewise. (vdupq_x_n_s16): Likewise. (vdupq_x_n_s32): Likewise. (vdupq_x_n_u8): Likewise. (vdupq_x_n_u16): Likewise. (vdupq_x_n_u32): Likewise. (vminq_x_s8): Likewise. (vminq_x_s16): Likewise. (vminq_x_s32): Likewise. (vminq_x_u8): Likewise. (vminq_x_u16): Likewise. (vminq_x_u32): Likewise. (vmaxq_x_s8): Likewise. (vmaxq_x_s16): Likewise. (vmaxq_x_s32): Likewise. (vmaxq_x_u8): Likewise. (vmaxq_x_u16): Likewise. (vmaxq_x_u32): Likewise. (vabdq_x_s8): Likewise. (vabdq_x_s16): Likewise. (vabdq_x_s32): Likewise. (vabdq_x_u8): Likewise. (vabdq_x_u16): Likewise. (vabdq_x_u32): Likewise. (vabsq_x_s8): Likewise. (vabsq_x_s16): Likewise. (vabsq_x_s32): Likewise. (vaddq_x_s8): Likewise. (vaddq_x_s16): Likewise. (vaddq_x_s32): Likewise. (vaddq_x_n_s8): Likewise. (vaddq_x_n_s16): Likewise. (vaddq_x_n_s32): Likewise. (vaddq_x_u8): Likewise. (vaddq_x_u16): Likewise. (vaddq_x_u32): Likewise. (vaddq_x_n_u8): Likewise. (vaddq_x_n_u16): Likewise. (vaddq_x_n_u32): Likewise. (vclsq_x_s8): Likewise. (vclsq_x_s16): Likewise. (vclsq_x_s32): Likewise. (vclzq_x_s8): Likewise. (vclzq_x_s16): Likewise. (vclzq_x_s32): Likewise. (vclzq_x_u8): Likewise. (vclzq_x_u16): Likewise. (vclzq_x_u32): Likewise. (vnegq_x_s8): Likewise. (vnegq_x_s16): Likewise. (vnegq_x_s32): Likewise. (vmulhq_x_s8): Likewise. (vmulhq_x_s16): Likewise. (vmulhq_x_s32): Likewise. (vmulhq_x_u8): Likewise. (vmulhq_x_u16): Likewise. (vmulhq_x_u32): Likewise. (vmullbq_poly_x_p8): Likewise. (vmullbq_poly_x_p16): Likewise. (vmullbq_int_x_s8): Likewise. (vmullbq_int_x_s16): Likewise. (vmullbq_int_x_s32): Likewise. (vmullbq_int_x_u8): Likewise. (vmullbq_int_x_u16): Likewise. (vmullbq_int_x_u32): Likewise. (vmulltq_poly_x_p8): Likewise. (vmulltq_poly_x_p16): Likewise. (vmulltq_int_x_s8): Likewise. (vmulltq_int_x_s16): Likewise. (vmulltq_int_x_s32): Likewise. (vmulltq_int_x_u8): Likewise. (vmulltq_int_x_u16): Likewise. (vmulltq_int_x_u32): Likewise. (vmulq_x_s8): Likewise. (vmulq_x_s16): Likewise. (vmulq_x_s32): Likewise. (vmulq_x_n_s8): Likewise. (vmulq_x_n_s16): Likewise. (vmulq_x_n_s32): Likewise. (vmulq_x_u8): Likewise. (vmulq_x_u16): Likewise. (vmulq_x_u32): Likewise. (vmulq_x_n_u8): Likewise. (vmulq_x_n_u16): Likewise. (vmulq_x_n_u32): Likewise. (vsubq_x_s8): Likewise. (vsubq_x_s16): Likewise. (vsubq_x_s32): Likewise. (vsubq_x_n_s8): Likewise. (vsubq_x_n_s16): Likewise. (vsubq_x_n_s32): Likewise. (vsubq_x_u8): Likewise. (vsubq_x_u16): Likewise. (vsubq_x_u32): Likewise. (vsubq_x_n_u8): Likewise. (vsubq_x_n_u16): Likewise. (vsubq_x_n_u32): Likewise. (vcaddq_rot90_x_s8): Likewise. (vcaddq_rot90_x_s16): Likewise. (vcaddq_rot90_x_s32): Likewise. (vcaddq_rot90_x_u8): Likewise. (vcaddq_rot90_x_u16): Likewise. (vcaddq_rot90_x_u32): Likewise. (vcaddq_rot270_x_s8): Likewise. (vcaddq_rot270_x_s16): Likewise. (vcaddq_rot270_x_s32): Likewise. (vcaddq_rot270_x_u8): Likewise. (vcaddq_rot270_x_u16): Likewise. (vcaddq_rot270_x_u32): Likewise. (vhaddq_x_n_s8): Likewise. (vhaddq_x_n_s16): Likewise. (vhaddq_x_n_s32): Likewise. (vhaddq_x_n_u8): Likewise. (vhaddq_x_n_u16): Likewise. (vhaddq_x_n_u32): Likewise. (vhaddq_x_s8): Likewise. (vhaddq_x_s16): Likewise. (vhaddq_x_s32): Likewise. (vhaddq_x_u8): Likewise. (vhaddq_x_u16): Likewise. (vhaddq_x_u32): Likewise. (vhcaddq_rot90_x_s8): Likewise. (vhcaddq_rot90_x_s16): Likewise. (vhcaddq_rot90_x_s32): Likewise. (vhcaddq_rot270_x_s8): Likewise. (vhcaddq_rot270_x_s16): Likewise. (vhcaddq_rot270_x_s32): Likewise. (vhsubq_x_n_s8): Likewise. (vhsubq_x_n_s16): Likewise. (vhsubq_x_n_s32): Likewise. (vhsubq_x_n_u8): Likewise. (vhsubq_x_n_u16): Likewise. (vhsubq_x_n_u32): Likewise. (vhsubq_x_s8): Likewise. (vhsubq_x_s16): Likewise. (vhsubq_x_s32): Likewise. (vhsubq_x_u8): Likewise. (vhsubq_x_u16): Likewise. (vhsubq_x_u32): Likewise. (vrhaddq_x_s8): Likewise. (vrhaddq_x_s16): Likewise. (vrhaddq_x_s32): Likewise. (vrhaddq_x_u8): Likewise. (vrhaddq_x_u16): Likewise. (vrhaddq_x_u32): Likewise. (vrmulhq_x_s8): Likewise. (vrmulhq_x_s16): Likewise. (vrmulhq_x_s32): Likewise. (vrmulhq_x_u8): Likewise. (vrmulhq_x_u16): Likewise. (vrmulhq_x_u32): Likewise. (vandq_x_s8): Likewise. (vandq_x_s16): Likewise. (vandq_x_s32): Likewise. (vandq_x_u8): Likewise. (vandq_x_u16): Likewise. (vandq_x_u32): Likewise. (vbicq_x_s8): Likewise. (vbicq_x_s16): Likewise. (vbicq_x_s32): Likewise. (vbicq_x_u8): Likewise. (vbicq_x_u16): Likewise. (vbicq_x_u32): Likewise. (vbrsrq_x_n_s8): Likewise. (vbrsrq_x_n_s16): Likewise. (vbrsrq_x_n_s32): Likewise. (vbrsrq_x_n_u8): Likewise. (vbrsrq_x_n_u16): Likewise. (vbrsrq_x_n_u32): Likewise. (veorq_x_s8): Likewise. (veorq_x_s16): Likewise. (veorq_x_s32): Likewise. (veorq_x_u8): Likewise. (veorq_x_u16): Likewise. (veorq_x_u32): Likewise. (vmovlbq_x_s8): Likewise. (vmovlbq_x_s16): Likewise. (vmovlbq_x_u8): Likewise. (vmovlbq_x_u16): Likewise. (vmovltq_x_s8): Likewise. (vmovltq_x_s16): Likewise. (vmovltq_x_u8): Likewise. (vmovltq_x_u16): Likewise. (vmvnq_x_s8): Likewise. (vmvnq_x_s16): Likewise. (vmvnq_x_s32): Likewise. (vmvnq_x_u8): Likewise. (vmvnq_x_u16): Likewise. (vmvnq_x_u32): Likewise. (vmvnq_x_n_s16): Likewise. (vmvnq_x_n_s32): Likewise. (vmvnq_x_n_u16): Likewise. (vmvnq_x_n_u32): Likewise. (vornq_x_s8): Likewise. (vornq_x_s16): Likewise. (vornq_x_s32): Likewise. (vornq_x_u8): Likewise. (vornq_x_u16): Likewise. (vornq_x_u32): Likewise. (vorrq_x_s8): Likewise. (vorrq_x_s16): Likewise. (vorrq_x_s32): Likewise. (vorrq_x_u8): Likewise. (vorrq_x_u16): Likewise. (vorrq_x_u32): Likewise. (vrev16q_x_s8): Likewise. (vrev16q_x_u8): Likewise. (vrev32q_x_s8): Likewise. (vrev32q_x_s16): Likewise. (vrev32q_x_u8): Likewise. (vrev32q_x_u16): Likewise. (vrev64q_x_s8): Likewise. (vrev64q_x_s16): Likewise. (vrev64q_x_s32): Likewise. (vrev64q_x_u8): Likewise. (vrev64q_x_u16): Likewise. (vrev64q_x_u32): Likewise. (vrshlq_x_s8): Likewise. (vrshlq_x_s16): Likewise. (vrshlq_x_s32): Likewise. (vrshlq_x_u8): Likewise. (vrshlq_x_u16): Likewise. (vrshlq_x_u32): Likewise. (vshllbq_x_n_s8): Likewise. (vshllbq_x_n_s16): Likewise. (vshllbq_x_n_u8): Likewise. (vshllbq_x_n_u16): Likewise. (vshlltq_x_n_s8): Likewise. (vshlltq_x_n_s16): Likewise. (vshlltq_x_n_u8): Likewise. (vshlltq_x_n_u16): Likewise. (vshlq_x_s8): Likewise. (vshlq_x_s16): Likewise. (vshlq_x_s32): Likewise. (vshlq_x_u8): Likewise. (vshlq_x_u16): Likewise. (vshlq_x_u32): Likewise. (vshlq_x_n_s8): Likewise. (vshlq_x_n_s16): Likewise. (vshlq_x_n_s32): Likewise. (vshlq_x_n_u8): Likewise. (vshlq_x_n_u16): Likewise. (vshlq_x_n_u32): Likewise. (vrshrq_x_n_s8): Likewise. (vrshrq_x_n_s16): Likewise. (vrshrq_x_n_s32): Likewise. (vrshrq_x_n_u8): Likewise. (vrshrq_x_n_u16): Likewise. (vrshrq_x_n_u32): Likewise. (vshrq_x_n_s8): Likewise. (vshrq_x_n_s16): Likewise. (vshrq_x_n_s32): Likewise. (vshrq_x_n_u8): Likewise. (vshrq_x_n_u16): Likewise. (vshrq_x_n_u32): Likewise. (vdupq_x_n_f16): Likewise. (vdupq_x_n_f32): Likewise. (vminnmq_x_f16): Likewise. (vminnmq_x_f32): Likewise. (vmaxnmq_x_f16): Likewise. (vmaxnmq_x_f32): Likewise. (vabdq_x_f16): Likewise. (vabdq_x_f32): Likewise. (vabsq_x_f16): Likewise. (vabsq_x_f32): Likewise. (vaddq_x_f16): Likewise. (vaddq_x_f32): Likewise. (vaddq_x_n_f16): Likewise. (vaddq_x_n_f32): Likewise. (vnegq_x_f16): Likewise. (vnegq_x_f32): Likewise. (vmulq_x_f16): Likewise. (vmulq_x_f32): Likewise. (vmulq_x_n_f16): Likewise. (vmulq_x_n_f32): Likewise. (vsubq_x_f16): Likewise. (vsubq_x_f32): Likewise. (vsubq_x_n_f16): Likewise. (vsubq_x_n_f32): Likewise. (vcaddq_rot90_x_f16): Likewise. (vcaddq_rot90_x_f32): Likewise. (vcaddq_rot270_x_f16): Likewise. (vcaddq_rot270_x_f32): Likewise. (vcmulq_x_f16): Likewise. (vcmulq_x_f32): Likewise. (vcmulq_rot90_x_f16): Likewise. (vcmulq_rot90_x_f32): Likewise. (vcmulq_rot180_x_f16): Likewise. (vcmulq_rot180_x_f32): Likewise. (vcmulq_rot270_x_f16): Likewise. (vcmulq_rot270_x_f32): Likewise. (vcvtaq_x_s16_f16): Likewise. (vcvtaq_x_s32_f32): Likewise. (vcvtaq_x_u16_f16): Likewise. (vcvtaq_x_u32_f32): Likewise. (vcvtnq_x_s16_f16): Likewise. (vcvtnq_x_s32_f32): Likewise. (vcvtnq_x_u16_f16): Likewise. (vcvtnq_x_u32_f32): Likewise. (vcvtpq_x_s16_f16): Likewise. (vcvtpq_x_s32_f32): Likewise. (vcvtpq_x_u16_f16): Likewise. (vcvtpq_x_u32_f32): Likewise. (vcvtmq_x_s16_f16): Likewise. (vcvtmq_x_s32_f32): Likewise. (vcvtmq_x_u16_f16): Likewise. (vcvtmq_x_u32_f32): Likewise. (vcvtbq_x_f32_f16): Likewise. (vcvttq_x_f32_f16): Likewise. (vcvtq_x_f16_u16): Likewise. (vcvtq_x_f16_s16): Likewise. (vcvtq_x_f32_s32): Likewise. (vcvtq_x_f32_u32): Likewise. (vcvtq_x_n_f16_s16): Likewise. (vcvtq_x_n_f16_u16): Likewise. (vcvtq_x_n_f32_s32): Likewise. (vcvtq_x_n_f32_u32): Likewise. (vcvtq_x_s16_f16): Likewise. (vcvtq_x_s32_f32): Likewise. (vcvtq_x_u16_f16): Likewise. (vcvtq_x_u32_f32): Likewise. (vcvtq_x_n_s16_f16): Likewise. (vcvtq_x_n_s32_f32): Likewise. (vcvtq_x_n_u16_f16): Likewise. (vcvtq_x_n_u32_f32): Likewise. (vrndq_x_f16): Likewise. (vrndq_x_f32): Likewise. (vrndnq_x_f16): Likewise. (vrndnq_x_f32): Likewise. (vrndmq_x_f16): Likewise. (vrndmq_x_f32): Likewise. (vrndpq_x_f16): Likewise. (vrndpq_x_f32): Likewise. (vrndaq_x_f16): Likewise. (vrndaq_x_f32): Likewise. (vrndxq_x_f16): Likewise. (vrndxq_x_f32): Likewise. (vandq_x_f16): Likewise. (vandq_x_f32): Likewise. (vbicq_x_f16): Likewise. (vbicq_x_f32): Likewise. (vbrsrq_x_n_f16): Likewise. (vbrsrq_x_n_f32): Likewise. (veorq_x_f16): Likewise. (veorq_x_f32): Likewise. (vornq_x_f16): Likewise. (vornq_x_f32): Likewise. (vorrq_x_f16): Likewise. (vorrq_x_f32): Likewise. (vrev32q_x_f16): Likewise. (vrev64q_x_f16): Likewise. (vrev64q_x_f32): Likewise. (__arm_vddupq_x_n_u8): Define intrinsic. (__arm_vddupq_x_n_u16): Likewise. (__arm_vddupq_x_n_u32): Likewise. (__arm_vddupq_x_wb_u8): Likewise. (__arm_vddupq_x_wb_u16): Likewise. (__arm_vddupq_x_wb_u32): Likewise. (__arm_vdwdupq_x_n_u8): Likewise. (__arm_vdwdupq_x_n_u16): Likewise. (__arm_vdwdupq_x_n_u32): Likewise. (__arm_vdwdupq_x_wb_u8): Likewise. (__arm_vdwdupq_x_wb_u16): Likewise. (__arm_vdwdupq_x_wb_u32): Likewise. (__arm_vidupq_x_n_u8): Likewise. (__arm_vidupq_x_n_u16): Likewise. (__arm_vidupq_x_n_u32): Likewise. (__arm_vidupq_x_wb_u8): Likewise. (__arm_vidupq_x_wb_u16): Likewise. (__arm_vidupq_x_wb_u32): Likewise. (__arm_viwdupq_x_n_u8): Likewise. (__arm_viwdupq_x_n_u16): Likewise. (__arm_viwdupq_x_n_u32): Likewise. (__arm_viwdupq_x_wb_u8): Likewise. (__arm_viwdupq_x_wb_u16): Likewise. (__arm_viwdupq_x_wb_u32): Likewise. (__arm_vdupq_x_n_s8): Likewise. (__arm_vdupq_x_n_s16): Likewise. (__arm_vdupq_x_n_s32): Likewise. (__arm_vdupq_x_n_u8): Likewise. (__arm_vdupq_x_n_u16): Likewise. (__arm_vdupq_x_n_u32): Likewise. (__arm_vminq_x_s8): Likewise. (__arm_vminq_x_s16): Likewise. (__arm_vminq_x_s32): Likewise. (__arm_vminq_x_u8): Likewise. (__arm_vminq_x_u16): Likewise. (__arm_vminq_x_u32): Likewise. (__arm_vmaxq_x_s8): Likewise. (__arm_vmaxq_x_s16): Likewise. (__arm_vmaxq_x_s32): Likewise. (__arm_vmaxq_x_u8): Likewise. (__arm_vmaxq_x_u16): Likewise. (__arm_vmaxq_x_u32): Likewise. (__arm_vabdq_x_s8): Likewise. (__arm_vabdq_x_s16): Likewise. (__arm_vabdq_x_s32): Likewise. (__arm_vabdq_x_u8): Likewise. (__arm_vabdq_x_u16): Likewise. (__arm_vabdq_x_u32): Likewise. (__arm_vabsq_x_s8): Likewise. (__arm_vabsq_x_s16): Likewise. (__arm_vabsq_x_s32): Likewise. (__arm_vaddq_x_s8): Likewise. (__arm_vaddq_x_s16): Likewise. (__arm_vaddq_x_s32): Likewise. (__arm_vaddq_x_n_s8): Likewise. (__arm_vaddq_x_n_s16): Likewise. (__arm_vaddq_x_n_s32): Likewise. (__arm_vaddq_x_u8): Likewise. (__arm_vaddq_x_u16): Likewise. (__arm_vaddq_x_u32): Likewise. (__arm_vaddq_x_n_u8): Likewise. (__arm_vaddq_x_n_u16): Likewise. (__arm_vaddq_x_n_u32): Likewise. (__arm_vclsq_x_s8): Likewise. (__arm_vclsq_x_s16): Likewise. (__arm_vclsq_x_s32): Likewise. (__arm_vclzq_x_s8): Likewise. (__arm_vclzq_x_s16): Likewise. (__arm_vclzq_x_s32): Likewise. (__arm_vclzq_x_u8): Likewise. (__arm_vclzq_x_u16): Likewise. (__arm_vclzq_x_u32): Likewise. (__arm_vnegq_x_s8): Likewise. (__arm_vnegq_x_s16): Likewise. (__arm_vnegq_x_s32): Likewise. (__arm_vmulhq_x_s8): Likewise. (__arm_vmulhq_x_s16): Likewise. (__arm_vmulhq_x_s32): Likewise. (__arm_vmulhq_x_u8): Likewise. (__arm_vmulhq_x_u16): Likewise. (__arm_vmulhq_x_u32): Likewise. (__arm_vmullbq_poly_x_p8): Likewise. (__arm_vmullbq_poly_x_p16): Likewise. (__arm_vmullbq_int_x_s8): Likewise. (__arm_vmullbq_int_x_s16): Likewise. (__arm_vmullbq_int_x_s32): Likewise. (__arm_vmullbq_int_x_u8): Likewise. (__arm_vmullbq_int_x_u16): Likewise. (__arm_vmullbq_int_x_u32): Likewise. (__arm_vmulltq_poly_x_p8): Likewise. (__arm_vmulltq_poly_x_p16): Likewise. (__arm_vmulltq_int_x_s8): Likewise. (__arm_vmulltq_int_x_s16): Likewise. (__arm_vmulltq_int_x_s32): Likewise. (__arm_vmulltq_int_x_u8): Likewise. (__arm_vmulltq_int_x_u16): Likewise. (__arm_vmulltq_int_x_u32): Likewise. (__arm_vmulq_x_s8): Likewise. (__arm_vmulq_x_s16): Likewise. (__arm_vmulq_x_s32): Likewise. (__arm_vmulq_x_n_s8): Likewise. (__arm_vmulq_x_n_s16): Likewise. (__arm_vmulq_x_n_s32): Likewise. (__arm_vmulq_x_u8): Likewise. (__arm_vmulq_x_u16): Likewise. (__arm_vmulq_x_u32): Likewise. (__arm_vmulq_x_n_u8): Likewise. (__arm_vmulq_x_n_u16): Likewise. (__arm_vmulq_x_n_u32): Likewise. (__arm_vsubq_x_s8): Likewise. (__arm_vsubq_x_s16): Likewise. (__arm_vsubq_x_s32): Likewise. (__arm_vsubq_x_n_s8): Likewise. (__arm_vsubq_x_n_s16): Likewise. (__arm_vsubq_x_n_s32): Likewise. (__arm_vsubq_x_u8): Likewise. (__arm_vsubq_x_u16): Likewise. (__arm_vsubq_x_u32): Likewise. (__arm_vsubq_x_n_u8): Likewise. (__arm_vsubq_x_n_u16): Likewise. (__arm_vsubq_x_n_u32): Likewise. (__arm_vcaddq_rot90_x_s8): Likewise. (__arm_vcaddq_rot90_x_s16): Likewise. (__arm_vcaddq_rot90_x_s32): Likewise. (__arm_vcaddq_rot90_x_u8): Likewise. (__arm_vcaddq_rot90_x_u16): Likewise. (__arm_vcaddq_rot90_x_u32): Likewise. (__arm_vcaddq_rot270_x_s8): Likewise. (__arm_vcaddq_rot270_x_s16): Likewise. (__arm_vcaddq_rot270_x_s32): Likewise. (__arm_vcaddq_rot270_x_u8): Likewise. (__arm_vcaddq_rot270_x_u16): Likewise. (__arm_vcaddq_rot270_x_u32): Likewise. (__arm_vhaddq_x_n_s8): Likewise. (__arm_vhaddq_x_n_s16): Likewise. (__arm_vhaddq_x_n_s32): Likewise. (__arm_vhaddq_x_n_u8): Likewise. (__arm_vhaddq_x_n_u16): Likewise. (__arm_vhaddq_x_n_u32): Likewise. (__arm_vhaddq_x_s8): Likewise. (__arm_vhaddq_x_s16): Likewise. (__arm_vhaddq_x_s32): Likewise. (__arm_vhaddq_x_u8): Likewise. (__arm_vhaddq_x_u16): Likewise. (__arm_vhaddq_x_u32): Likewise. (__arm_vhcaddq_rot90_x_s8): Likewise. (__arm_vhcaddq_rot90_x_s16): Likewise. (__arm_vhcaddq_rot90_x_s32): Likewise. (__arm_vhcaddq_rot270_x_s8): Likewise. (__arm_vhcaddq_rot270_x_s16): Likewise. (__arm_vhcaddq_rot270_x_s32): Likewise. (__arm_vhsubq_x_n_s8): Likewise. (__arm_vhsubq_x_n_s16): Likewise. (__arm_vhsubq_x_n_s32): Likewise. (__arm_vhsubq_x_n_u8): Likewise. (__arm_vhsubq_x_n_u16): Likewise. (__arm_vhsubq_x_n_u32): Likewise. (__arm_vhsubq_x_s8): Likewise. (__arm_vhsubq_x_s16): Likewise. (__arm_vhsubq_x_s32): Likewise. (__arm_vhsubq_x_u8): Likewise. (__arm_vhsubq_x_u16): Likewise. (__arm_vhsubq_x_u32): Likewise. (__arm_vrhaddq_x_s8): Likewise. (__arm_vrhaddq_x_s16): Likewise. (__arm_vrhaddq_x_s32): Likewise. (__arm_vrhaddq_x_u8): Likewise. (__arm_vrhaddq_x_u16): Likewise. (__arm_vrhaddq_x_u32): Likewise. (__arm_vrmulhq_x_s8): Likewise. (__arm_vrmulhq_x_s16): Likewise. (__arm_vrmulhq_x_s32): Likewise. (__arm_vrmulhq_x_u8): Likewise. (__arm_vrmulhq_x_u16): Likewise. (__arm_vrmulhq_x_u32): Likewise. (__arm_vandq_x_s8): Likewise. (__arm_vandq_x_s16): Likewise. (__arm_vandq_x_s32): Likewise. (__arm_vandq_x_u8): Likewise. (__arm_vandq_x_u16): Likewise. (__arm_vandq_x_u32): Likewise. (__arm_vbicq_x_s8): Likewise. (__arm_vbicq_x_s16): Likewise. (__arm_vbicq_x_s32): Likewise. (__arm_vbicq_x_u8): Likewise. (__arm_vbicq_x_u16): Likewise. (__arm_vbicq_x_u32): Likewise. (__arm_vbrsrq_x_n_s8): Likewise. (__arm_vbrsrq_x_n_s16): Likewise. (__arm_vbrsrq_x_n_s32): Likewise. (__arm_vbrsrq_x_n_u8): Likewise. (__arm_vbrsrq_x_n_u16): Likewise. (__arm_vbrsrq_x_n_u32): Likewise. (__arm_veorq_x_s8): Likewise. (__arm_veorq_x_s16): Likewise. (__arm_veorq_x_s32): Likewise. (__arm_veorq_x_u8): Likewise. (__arm_veorq_x_u16): Likewise. (__arm_veorq_x_u32): Likewise. (__arm_vmovlbq_x_s8): Likewise. (__arm_vmovlbq_x_s16): Likewise. (__arm_vmovlbq_x_u8): Likewise. (__arm_vmovlbq_x_u16): Likewise. (__arm_vmovltq_x_s8): Likewise. (__arm_vmovltq_x_s16): Likewise. (__arm_vmovltq_x_u8): Likewise. (__arm_vmovltq_x_u16): Likewise. (__arm_vmvnq_x_s8): Likewise. (__arm_vmvnq_x_s16): Likewise. (__arm_vmvnq_x_s32): Likewise. (__arm_vmvnq_x_u8): Likewise. (__arm_vmvnq_x_u16): Likewise. (__arm_vmvnq_x_u32): Likewise. (__arm_vmvnq_x_n_s16): Likewise. (__arm_vmvnq_x_n_s32): Likewise. (__arm_vmvnq_x_n_u16): Likewise. (__arm_vmvnq_x_n_u32): Likewise. (__arm_vornq_x_s8): Likewise. (__arm_vornq_x_s16): Likewise. (__arm_vornq_x_s32): Likewise. (__arm_vornq_x_u8): Likewise. (__arm_vornq_x_u16): Likewise. (__arm_vornq_x_u32): Likewise. (__arm_vorrq_x_s8): Likewise. (__arm_vorrq_x_s16): Likewise. (__arm_vorrq_x_s32): Likewise. (__arm_vorrq_x_u8): Likewise. (__arm_vorrq_x_u16): Likewise. (__arm_vorrq_x_u32): Likewise. (__arm_vrev16q_x_s8): Likewise. (__arm_vrev16q_x_u8): Likewise. (__arm_vrev32q_x_s8): Likewise. (__arm_vrev32q_x_s16): Likewise. (__arm_vrev32q_x_u8): Likewise. (__arm_vrev32q_x_u16): Likewise. (__arm_vrev64q_x_s8): Likewise. (__arm_vrev64q_x_s16): Likewise. (__arm_vrev64q_x_s32): Likewise. (__arm_vrev64q_x_u8): Likewise. (__arm_vrev64q_x_u16): Likewise. (__arm_vrev64q_x_u32): Likewise. (__arm_vrshlq_x_s8): Likewise. (__arm_vrshlq_x_s16): Likewise. (__arm_vrshlq_x_s32): Likewise. (__arm_vrshlq_x_u8): Likewise. (__arm_vrshlq_x_u16): Likewise. (__arm_vrshlq_x_u32): Likewise. (__arm_vshllbq_x_n_s8): Likewise. (__arm_vshllbq_x_n_s16): Likewise. (__arm_vshllbq_x_n_u8): Likewise. (__arm_vshllbq_x_n_u16): Likewise. (__arm_vshlltq_x_n_s8): Likewise. (__arm_vshlltq_x_n_s16): Likewise. (__arm_vshlltq_x_n_u8): Likewise. (__arm_vshlltq_x_n_u16): Likewise. (__arm_vshlq_x_s8): Likewise. (__arm_vshlq_x_s16): Likewise. (__arm_vshlq_x_s32): Likewise. (__arm_vshlq_x_u8): Likewise. (__arm_vshlq_x_u16): Likewise. (__arm_vshlq_x_u32): Likewise. (__arm_vshlq_x_n_s8): Likewise. (__arm_vshlq_x_n_s16): Likewise. (__arm_vshlq_x_n_s32): Likewise. (__arm_vshlq_x_n_u8): Likewise. (__arm_vshlq_x_n_u16): Likewise. (__arm_vshlq_x_n_u32): Likewise. (__arm_vrshrq_x_n_s8): Likewise. (__arm_vrshrq_x_n_s16): Likewise. (__arm_vrshrq_x_n_s32): Likewise. (__arm_vrshrq_x_n_u8): Likewise. (__arm_vrshrq_x_n_u16): Likewise. (__arm_vrshrq_x_n_u32): Likewise. (__arm_vshrq_x_n_s8): Likewise. (__arm_vshrq_x_n_s16): Likewise. (__arm_vshrq_x_n_s32): Likewise. (__arm_vshrq_x_n_u8): Likewise. (__arm_vshrq_x_n_u16): Likewise. (__arm_vshrq_x_n_u32): Likewise. (__arm_vdupq_x_n_f16): Likewise. (__arm_vdupq_x_n_f32): Likewise. (__arm_vminnmq_x_f16): Likewise. (__arm_vminnmq_x_f32): Likewise. (__arm_vmaxnmq_x_f16): Likewise. (__arm_vmaxnmq_x_f32): Likewise. (__arm_vabdq_x_f16): Likewise. (__arm_vabdq_x_f32): Likewise. (__arm_vabsq_x_f16): Likewise. (__arm_vabsq_x_f32): Likewise. (__arm_vaddq_x_f16): Likewise. (__arm_vaddq_x_f32): Likewise. (__arm_vaddq_x_n_f16): Likewise. (__arm_vaddq_x_n_f32): Likewise. (__arm_vnegq_x_f16): Likewise. (__arm_vnegq_x_f32): Likewise. (__arm_vmulq_x_f16): Likewise. (__arm_vmulq_x_f32): Likewise. (__arm_vmulq_x_n_f16): Likewise. (__arm_vmulq_x_n_f32): Likewise. (__arm_vsubq_x_f16): Likewise. (__arm_vsubq_x_f32): Likewise. (__arm_vsubq_x_n_f16): Likewise. (__arm_vsubq_x_n_f32): Likewise. (__arm_vcaddq_rot90_x_f16): Likewise. (__arm_vcaddq_rot90_x_f32): Likewise. (__arm_vcaddq_rot270_x_f16): Likewise. (__arm_vcaddq_rot270_x_f32): Likewise. (__arm_vcmulq_x_f16): Likewise. (__arm_vcmulq_x_f32): Likewise. (__arm_vcmulq_rot90_x_f16): Likewise. (__arm_vcmulq_rot90_x_f32): Likewise. (__arm_vcmulq_rot180_x_f16): Likewise. (__arm_vcmulq_rot180_x_f32): Likewise. (__arm_vcmulq_rot270_x_f16): Likewise. (__arm_vcmulq_rot270_x_f32): Likewise. (__arm_vcvtaq_x_s16_f16): Likewise. (__arm_vcvtaq_x_s32_f32): Likewise. (__arm_vcvtaq_x_u16_f16): Likewise. (__arm_vcvtaq_x_u32_f32): Likewise. (__arm_vcvtnq_x_s16_f16): Likewise. (__arm_vcvtnq_x_s32_f32): Likewise. (__arm_vcvtnq_x_u16_f16): Likewise. (__arm_vcvtnq_x_u32_f32): Likewise. (__arm_vcvtpq_x_s16_f16): Likewise. (__arm_vcvtpq_x_s32_f32): Likewise. (__arm_vcvtpq_x_u16_f16): Likewise. (__arm_vcvtpq_x_u32_f32): Likewise. (__arm_vcvtmq_x_s16_f16): Likewise. (__arm_vcvtmq_x_s32_f32): Likewise. (__arm_vcvtmq_x_u16_f16): Likewise. (__arm_vcvtmq_x_u32_f32): Likewise. (__arm_vcvtbq_x_f32_f16): Likewise. (__arm_vcvttq_x_f32_f16): Likewise. (__arm_vcvtq_x_f16_u16): Likewise. (__arm_vcvtq_x_f16_s16): Likewise. (__arm_vcvtq_x_f32_s32): Likewise. (__arm_vcvtq_x_f32_u32): Likewise. (__arm_vcvtq_x_n_f16_s16): Likewise. (__arm_vcvtq_x_n_f16_u16): Likewise. (__arm_vcvtq_x_n_f32_s32): Likewise. (__arm_vcvtq_x_n_f32_u32): Likewise. (__arm_vcvtq_x_s16_f16): Likewise. (__arm_vcvtq_x_s32_f32): Likewise. (__arm_vcvtq_x_u16_f16): Likewise. (__arm_vcvtq_x_u32_f32): Likewise. (__arm_vcvtq_x_n_s16_f16): Likewise. (__arm_vcvtq_x_n_s32_f32): Likewise. (__arm_vcvtq_x_n_u16_f16): Likewise. (__arm_vcvtq_x_n_u32_f32): Likewise. (__arm_vrndq_x_f16): Likewise. (__arm_vrndq_x_f32): Likewise. (__arm_vrndnq_x_f16): Likewise. (__arm_vrndnq_x_f32): Likewise. (__arm_vrndmq_x_f16): Likewise. (__arm_vrndmq_x_f32): Likewise. (__arm_vrndpq_x_f16): Likewise. (__arm_vrndpq_x_f32): Likewise. (__arm_vrndaq_x_f16): Likewise. (__arm_vrndaq_x_f32): Likewise. (__arm_vrndxq_x_f16): Likewise. (__arm_vrndxq_x_f32): Likewise. (__arm_vandq_x_f16): Likewise. (__arm_vandq_x_f32): Likewise. (__arm_vbicq_x_f16): Likewise. (__arm_vbicq_x_f32): Likewise. (__arm_vbrsrq_x_n_f16): Likewise. (__arm_vbrsrq_x_n_f32): Likewise. (__arm_veorq_x_f16): Likewise. (__arm_veorq_x_f32): Likewise. (__arm_vornq_x_f16): Likewise. (__arm_vornq_x_f32): Likewise. (__arm_vorrq_x_f16): Likewise. (__arm_vorrq_x_f32): Likewise. (__arm_vrev32q_x_f16): Likewise. (__arm_vrev64q_x_f16): Likewise. (__arm_vrev64q_x_f32): Likewise. (vabdq_x): Define polymorphic variant. (vabsq_x): Likewise. (vaddq_x): Likewise. (vandq_x): Likewise. (vbicq_x): Likewise. (vbrsrq_x): Likewise. (vcaddq_rot270_x): Likewise. (vcaddq_rot90_x): Likewise. (vcmulq_rot180_x): Likewise. (vcmulq_rot270_x): Likewise. (vcmulq_x): Likewise. (vcvtq_x): Likewise. (vcvtq_x_n): Likewise. (vcvtnq_m): Likewise. (veorq_x): Likewise. (vmaxnmq_x): Likewise. (vminnmq_x): Likewise. (vmulq_x): Likewise. (vnegq_x): Likewise. (vornq_x): Likewise. (vorrq_x): Likewise. (vrev32q_x): Likewise. (vrev64q_x): Likewise. (vrndaq_x): Likewise. (vrndmq_x): Likewise. (vrndnq_x): Likewise. (vrndpq_x): Likewise. (vrndq_x): Likewise. (vrndxq_x): Likewise. (vsubq_x): Likewise. (vcmulq_rot90_x): Likewise. (vadciq): Likewise. (vclsq_x): Likewise. (vclzq_x): Likewise. (vhaddq_x): Likewise. (vhcaddq_rot270_x): Likewise. (vhcaddq_rot90_x): Likewise. (vhsubq_x): Likewise. (vmaxq_x): Likewise. (vminq_x): Likewise. (vmovlbq_x): Likewise. (vmovltq_x): Likewise. (vmulhq_x): Likewise. (vmullbq_int_x): Likewise. (vmullbq_poly_x): Likewise. (vmulltq_int_x): Likewise. (vmulltq_poly_x): Likewise. (vmvnq_x): Likewise. (vrev16q_x): Likewise. (vrhaddq_x): Likewise. (vrmulhq_x): Likewise. (vrshlq_x): Likewise. (vrshrq_x): Likewise. (vshllbq_x): Likewise. (vshlltq_x): Likewise. (vshlq_x_n): Likewise. (vshlq_x): Likewise. (vdwdupq_x_u8): Likewise. (vdwdupq_x_u16): Likewise. (vdwdupq_x_u32): Likewise. (viwdupq_x_u8): Likewise. (viwdupq_x_u16): Likewise. (viwdupq_x_u32): Likewise. (vidupq_x_u8): Likewise. (vddupq_x_u8): Likewise. (vidupq_x_u16): Likewise. (vddupq_x_u16): Likewise. (vidupq_x_u32): Likewise. (vddupq_x_u32): Likewise. (vshrq_x): Likewise. gcc/testsuite/ChangeLog: 2020-03-20 Srinath Parvathaneni <srinath.parvathaneni@arm.com> * gcc.target/arm/mve/intrinsics/vabdq_x_f16.c: New test. * gcc.target/arm/mve/intrinsics/vabdq_x_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vabdq_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vabdq_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vabdq_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vabdq_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vabdq_x_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vabdq_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vabsq_x_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vabsq_x_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vabsq_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vabsq_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vabsq_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_x_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_x_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_x_n_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_x_n_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_x_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_x_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_x_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_x_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_x_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_x_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_x_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vaddq_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vandq_x_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vandq_x_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vandq_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vandq_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vandq_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vandq_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vandq_x_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vandq_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vbicq_x_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vbicq_x_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vbicq_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vbicq_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vbicq_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vbicq_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vbicq_x_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vbicq_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vbrsrq_x_n_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vbrsrq_x_n_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vbrsrq_x_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vbrsrq_x_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vbrsrq_x_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vbrsrq_x_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vbrsrq_x_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vbrsrq_x_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vclsq_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vclsq_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vclsq_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vclzq_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vclzq_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vclzq_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vclzq_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vclzq_x_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vclzq_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmulq_rot180_x_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmulq_rot180_x_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmulq_rot270_x_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmulq_rot270_x_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmulq_rot90_x_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmulq_rot90_x_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmulq_x_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcmulq_x_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtaq_x_s16_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtaq_x_s32_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtaq_x_u16_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtaq_x_u32_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtbq_x_f32_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtmq_x_s16_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtmq_x_s32_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtmq_x_u16_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtmq_x_u32_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtnq_x_s16_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtnq_x_s32_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtnq_x_u16_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtnq_x_u32_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtpq_x_s16_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtpq_x_s32_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtpq_x_u16_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtpq_x_u32_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtq_x_f16_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtq_x_f16_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtq_x_f32_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtq_x_f32_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtq_x_n_f16_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtq_x_n_f16_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtq_x_n_f32_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtq_x_n_f32_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtq_x_n_s16_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtq_x_n_s32_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtq_x_n_u16_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtq_x_n_u32_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtq_x_s16_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtq_x_s32_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtq_x_u16_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvtq_x_u32_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vcvttq_x_f32_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vddupq_x_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vddupq_x_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vddupq_x_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vddupq_x_wb_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vddupq_x_wb_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vddupq_x_wb_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vdupq_x_n_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vdupq_x_n_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vdupq_x_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vdupq_x_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vdupq_x_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vdupq_x_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vdupq_x_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vdupq_x_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/veorq_x_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/veorq_x_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/veorq_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/veorq_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/veorq_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/veorq_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/veorq_x_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/veorq_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vhaddq_x_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vhaddq_x_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vhaddq_x_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vhaddq_x_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vhaddq_x_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vhaddq_x_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vhaddq_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vhaddq_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vhaddq_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vhaddq_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vhaddq_x_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vhaddq_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vhcaddq_rot270_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vhcaddq_rot270_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vhcaddq_rot270_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vhcaddq_rot90_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vhcaddq_rot90_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vhcaddq_rot90_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vhsubq_x_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vhsubq_x_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vhsubq_x_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vhsubq_x_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vhsubq_x_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vhsubq_x_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vhsubq_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vhsubq_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vhsubq_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vhsubq_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vhsubq_x_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vhsubq_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vidupq_x_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vidupq_x_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vidupq_x_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vidupq_x_wb_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vidupq_x_wb_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vidupq_x_wb_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/viwdupq_x_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/viwdupq_x_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/viwdupq_x_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxnmq_x_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxnmq_x_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxq_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxq_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxq_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxq_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxq_x_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmaxq_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vminnmq_x_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vminnmq_x_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vminq_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vminq_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vminq_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vminq_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vminq_x_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vminq_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmovlbq_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmovlbq_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmovlbq_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmovlbq_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmovltq_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmovltq_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmovltq_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmovltq_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulhq_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulhq_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulhq_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulhq_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulhq_x_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulhq_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmullbq_int_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmullbq_int_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmullbq_int_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmullbq_int_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmullbq_int_x_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmullbq_int_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmullbq_poly_x_p16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmullbq_poly_x_p8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulltq_int_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulltq_int_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulltq_int_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulltq_int_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulltq_int_x_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulltq_int_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulltq_poly_x_p16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulltq_poly_x_p8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_x_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_x_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_x_n_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_x_n_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_x_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_x_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_x_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_x_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_x_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_x_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_x_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmulq_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmvnq_x_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmvnq_x_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmvnq_x_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmvnq_x_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmvnq_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmvnq_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmvnq_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vmvnq_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vmvnq_x_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vmvnq_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vnegq_x_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vnegq_x_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vnegq_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vnegq_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vnegq_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vornq_x_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vornq_x_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vornq_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vornq_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vornq_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vornq_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vornq_x_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vornq_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vorrq_x_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vorrq_x_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vorrq_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vorrq_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vorrq_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vorrq_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vorrq_x_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vorrq_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrev16q_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrev16q_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrev32q_x_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrev32q_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrev32q_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrev32q_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrev32q_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrev64q_x_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrev64q_x_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrev64q_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrev64q_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrev64q_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrev64q_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrev64q_x_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrev64q_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrhaddq_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrhaddq_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrhaddq_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrhaddq_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrhaddq_x_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrhaddq_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrmulhq_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrmulhq_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrmulhq_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrmulhq_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrmulhq_x_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrmulhq_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrndaq_x_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrndaq_x_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrndmq_x_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrndmq_x_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrndnq_x_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrndnq_x_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrndpq_x_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrndpq_x_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrndq_x_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrndq_x_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrndxq_x_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrndxq_x_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshlq_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshlq_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshlq_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshlq_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshlq_x_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshlq_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshrq_x_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshrq_x_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshrq_x_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshrq_x_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshrq_x_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vrshrq_x_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vshllbq_x_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vshllbq_x_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vshllbq_x_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vshllbq_x_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlltq_x_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlltq_x_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlltq_x_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlltq_x_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_x_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_x_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_x_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_x_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_x_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_x_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_x_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vshlq_x_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vshrq_x_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vshrq_x_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vshrq_x_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vshrq_x_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vshrq_x_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_x_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_x_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_x_n_f16.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_x_n_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_x_n_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_x_n_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_x_n_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_x_n_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_x_n_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_x_n_u8.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_x_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_x_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_x_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_x_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_x_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vsubq_x_u8.c: Likewise.
-rw-r--r--gcc/ChangeLog825
-rw-r--r--gcc/config/arm/arm_mve.h3871
-rw-r--r--gcc/testsuite/ChangeLog378
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_f16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_f32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_s16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_s32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_u16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_u32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_u8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_f16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_f32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_s16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_s32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_f16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_f32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_f16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_f32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_s16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_s32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_s8.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_u16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_u32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_u8.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_s16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_s32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_s8.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_u16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_u32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_u8.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_f16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_f32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_s16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_s32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_u16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_u32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_u8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_f16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_f32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_s16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_s32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_u16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_u32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_u8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_f16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_f32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_s16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_s32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_s8.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_u16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_u32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_u8.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_f16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_f32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_s16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_s32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_u16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_u32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_u8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_f16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_f32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_s16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_s32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_u16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_u32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_u8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_x_s16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_x_s32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_s16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_s32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_u16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_u32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_u8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot180_x_f16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot180_x_f32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot270_x_f16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot270_x_f32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot90_x_f16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot90_x_f32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_x_f16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_x_f32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_x_s16_f16.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_x_s32_f32.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_x_u16_f16.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_x_u32_f32.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtbq_x_f32_f16.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_x_s16_f16.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_x_s32_f32.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_x_u16_f16.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_x_u32_f32.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_x_s16_f16.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_x_s32_f32.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_x_u16_f16.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_x_u32_f32.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_x_s16_f16.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_x_s32_f32.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_x_u16_f16.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_x_u32_f32.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_f16_s16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_f16_u16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_f32_s32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_f32_u32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_f16_s16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_f16_u16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_f32_s32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_f32_u32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_s16_f16.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_s32_f32.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_u16_f16.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_u32_f32.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_s16_f16.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_s32_f32.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_u16_f16.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_u32_f32.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvttq_x_f32_f16.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_n_u16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_n_u32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_n_u8.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_wb_u16.c26
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_wb_u32.c26
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_wb_u8.c26
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_f16.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_f32.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_s16.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_s32.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_s8.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_u16.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_u32.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_u8.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u8.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u8.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_f16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_f32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_s16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_s32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_s8.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_u16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_u32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_u8.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_n_f16.c2
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_n_f32.c2
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmasq_n_f16.c2
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmasq_n_f32.c2
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_s16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_s32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_s8.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_u16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_u32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_u8.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_s16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_s32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_u16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_u32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_u8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_x_s16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_x_s32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_x_s16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_x_s32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_s16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_s32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_s8.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_u16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_u32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_u8.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_s16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_s32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_u16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_u32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_u8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_n_u16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_n_u32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_n_u8.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_wb_u16.c26
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_wb_u32.c26
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_wb_u8.c26
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_n_u16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_n_u32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_n_u8.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u8.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmq_x_f16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmq_x_f32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_s16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_s32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_u16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_u32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_u8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmq_x_f16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmq_x_f32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_s16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_s32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_u16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_u32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_u8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_x_s16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_x_u16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_x_u8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_x_s16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_x_u16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_x_u8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_s16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_s32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_u16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_u32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_u8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_s16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_s32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_u16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_u32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_u8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_poly_x_p16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_poly_x_p8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_s16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_s32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_u16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_u32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_u8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_poly_x_p16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_poly_x_p8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_f16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_f32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_f16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_f32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_s16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_s32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_s8.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_u16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_u32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_u8.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_s16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_s32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_s8.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_u16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_u32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_u8.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_n_s16.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_n_s32.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_n_u16.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_n_u32.c15
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_s16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_s32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_s8.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_u16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_u32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_u8.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_f16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_f32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_s16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_s32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_f16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_f32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_s16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_s32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_u16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_u32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_u8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_f16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_f32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_s16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_s32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_u16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_u32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_u8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev16q_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev16q_x_u8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_f16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_s16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_u16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_u8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_f16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_f32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_s16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_s32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_u16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_u32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_u8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_s16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_s32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_u16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_u32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_u8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_s16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_s32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_u16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_u32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_u8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndaq_x_f16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndaq_x_f32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndmq_x_f16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndmq_x_f32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndnq_x_f16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndnq_x_f32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndpq_x_f16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndpq_x_f32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndq_x_f16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndq_x_f32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndxq_x_f16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndxq_x_f32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_s16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_s32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_s8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_u16.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_u32.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_u8.c23
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_s16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_s32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_s8.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_u16.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_u32.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_u8.c24
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_x_n_s16.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_x_n_s8.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_x_n_u16.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_x_n_u8.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_x_n_s16.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_x_n_s8.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_x_n_u16.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_x_n_u8.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_s16.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_s32.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_s8.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_u16.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_u32.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_u8.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_s16.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_s32.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_s8.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_u16.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_u32.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_u8.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_s16.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_s32.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_s8.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_u16.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_u8.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_f16.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_f32.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_f16.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_f32.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_s16.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_s32.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_s8.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_u16.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_u32.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_u8.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_s16.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_s32.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_s8.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_u16.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_u32.c16
-rw-r--r--gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_u8.c16
382 files changed, 13215 insertions, 20 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 3623455..6bbdf06 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,828 @@
+2020-03-20 Srinath Parvathaneni <srinath.parvathaneni@arm.com>
+
+ * config/arm/arm_mve.h (vddupq_x_n_u8): Define macro.
+ (vddupq_x_n_u16): Likewise.
+ (vddupq_x_n_u32): Likewise.
+ (vddupq_x_wb_u8): Likewise.
+ (vddupq_x_wb_u16): Likewise.
+ (vddupq_x_wb_u32): Likewise.
+ (vdwdupq_x_n_u8): Likewise.
+ (vdwdupq_x_n_u16): Likewise.
+ (vdwdupq_x_n_u32): Likewise.
+ (vdwdupq_x_wb_u8): Likewise.
+ (vdwdupq_x_wb_u16): Likewise.
+ (vdwdupq_x_wb_u32): Likewise.
+ (vidupq_x_n_u8): Likewise.
+ (vidupq_x_n_u16): Likewise.
+ (vidupq_x_n_u32): Likewise.
+ (vidupq_x_wb_u8): Likewise.
+ (vidupq_x_wb_u16): Likewise.
+ (vidupq_x_wb_u32): Likewise.
+ (viwdupq_x_n_u8): Likewise.
+ (viwdupq_x_n_u16): Likewise.
+ (viwdupq_x_n_u32): Likewise.
+ (viwdupq_x_wb_u8): Likewise.
+ (viwdupq_x_wb_u16): Likewise.
+ (viwdupq_x_wb_u32): Likewise.
+ (vdupq_x_n_s8): Likewise.
+ (vdupq_x_n_s16): Likewise.
+ (vdupq_x_n_s32): Likewise.
+ (vdupq_x_n_u8): Likewise.
+ (vdupq_x_n_u16): Likewise.
+ (vdupq_x_n_u32): Likewise.
+ (vminq_x_s8): Likewise.
+ (vminq_x_s16): Likewise.
+ (vminq_x_s32): Likewise.
+ (vminq_x_u8): Likewise.
+ (vminq_x_u16): Likewise.
+ (vminq_x_u32): Likewise.
+ (vmaxq_x_s8): Likewise.
+ (vmaxq_x_s16): Likewise.
+ (vmaxq_x_s32): Likewise.
+ (vmaxq_x_u8): Likewise.
+ (vmaxq_x_u16): Likewise.
+ (vmaxq_x_u32): Likewise.
+ (vabdq_x_s8): Likewise.
+ (vabdq_x_s16): Likewise.
+ (vabdq_x_s32): Likewise.
+ (vabdq_x_u8): Likewise.
+ (vabdq_x_u16): Likewise.
+ (vabdq_x_u32): Likewise.
+ (vabsq_x_s8): Likewise.
+ (vabsq_x_s16): Likewise.
+ (vabsq_x_s32): Likewise.
+ (vaddq_x_s8): Likewise.
+ (vaddq_x_s16): Likewise.
+ (vaddq_x_s32): Likewise.
+ (vaddq_x_n_s8): Likewise.
+ (vaddq_x_n_s16): Likewise.
+ (vaddq_x_n_s32): Likewise.
+ (vaddq_x_u8): Likewise.
+ (vaddq_x_u16): Likewise.
+ (vaddq_x_u32): Likewise.
+ (vaddq_x_n_u8): Likewise.
+ (vaddq_x_n_u16): Likewise.
+ (vaddq_x_n_u32): Likewise.
+ (vclsq_x_s8): Likewise.
+ (vclsq_x_s16): Likewise.
+ (vclsq_x_s32): Likewise.
+ (vclzq_x_s8): Likewise.
+ (vclzq_x_s16): Likewise.
+ (vclzq_x_s32): Likewise.
+ (vclzq_x_u8): Likewise.
+ (vclzq_x_u16): Likewise.
+ (vclzq_x_u32): Likewise.
+ (vnegq_x_s8): Likewise.
+ (vnegq_x_s16): Likewise.
+ (vnegq_x_s32): Likewise.
+ (vmulhq_x_s8): Likewise.
+ (vmulhq_x_s16): Likewise.
+ (vmulhq_x_s32): Likewise.
+ (vmulhq_x_u8): Likewise.
+ (vmulhq_x_u16): Likewise.
+ (vmulhq_x_u32): Likewise.
+ (vmullbq_poly_x_p8): Likewise.
+ (vmullbq_poly_x_p16): Likewise.
+ (vmullbq_int_x_s8): Likewise.
+ (vmullbq_int_x_s16): Likewise.
+ (vmullbq_int_x_s32): Likewise.
+ (vmullbq_int_x_u8): Likewise.
+ (vmullbq_int_x_u16): Likewise.
+ (vmullbq_int_x_u32): Likewise.
+ (vmulltq_poly_x_p8): Likewise.
+ (vmulltq_poly_x_p16): Likewise.
+ (vmulltq_int_x_s8): Likewise.
+ (vmulltq_int_x_s16): Likewise.
+ (vmulltq_int_x_s32): Likewise.
+ (vmulltq_int_x_u8): Likewise.
+ (vmulltq_int_x_u16): Likewise.
+ (vmulltq_int_x_u32): Likewise.
+ (vmulq_x_s8): Likewise.
+ (vmulq_x_s16): Likewise.
+ (vmulq_x_s32): Likewise.
+ (vmulq_x_n_s8): Likewise.
+ (vmulq_x_n_s16): Likewise.
+ (vmulq_x_n_s32): Likewise.
+ (vmulq_x_u8): Likewise.
+ (vmulq_x_u16): Likewise.
+ (vmulq_x_u32): Likewise.
+ (vmulq_x_n_u8): Likewise.
+ (vmulq_x_n_u16): Likewise.
+ (vmulq_x_n_u32): Likewise.
+ (vsubq_x_s8): Likewise.
+ (vsubq_x_s16): Likewise.
+ (vsubq_x_s32): Likewise.
+ (vsubq_x_n_s8): Likewise.
+ (vsubq_x_n_s16): Likewise.
+ (vsubq_x_n_s32): Likewise.
+ (vsubq_x_u8): Likewise.
+ (vsubq_x_u16): Likewise.
+ (vsubq_x_u32): Likewise.
+ (vsubq_x_n_u8): Likewise.
+ (vsubq_x_n_u16): Likewise.
+ (vsubq_x_n_u32): Likewise.
+ (vcaddq_rot90_x_s8): Likewise.
+ (vcaddq_rot90_x_s16): Likewise.
+ (vcaddq_rot90_x_s32): Likewise.
+ (vcaddq_rot90_x_u8): Likewise.
+ (vcaddq_rot90_x_u16): Likewise.
+ (vcaddq_rot90_x_u32): Likewise.
+ (vcaddq_rot270_x_s8): Likewise.
+ (vcaddq_rot270_x_s16): Likewise.
+ (vcaddq_rot270_x_s32): Likewise.
+ (vcaddq_rot270_x_u8): Likewise.
+ (vcaddq_rot270_x_u16): Likewise.
+ (vcaddq_rot270_x_u32): Likewise.
+ (vhaddq_x_n_s8): Likewise.
+ (vhaddq_x_n_s16): Likewise.
+ (vhaddq_x_n_s32): Likewise.
+ (vhaddq_x_n_u8): Likewise.
+ (vhaddq_x_n_u16): Likewise.
+ (vhaddq_x_n_u32): Likewise.
+ (vhaddq_x_s8): Likewise.
+ (vhaddq_x_s16): Likewise.
+ (vhaddq_x_s32): Likewise.
+ (vhaddq_x_u8): Likewise.
+ (vhaddq_x_u16): Likewise.
+ (vhaddq_x_u32): Likewise.
+ (vhcaddq_rot90_x_s8): Likewise.
+ (vhcaddq_rot90_x_s16): Likewise.
+ (vhcaddq_rot90_x_s32): Likewise.
+ (vhcaddq_rot270_x_s8): Likewise.
+ (vhcaddq_rot270_x_s16): Likewise.
+ (vhcaddq_rot270_x_s32): Likewise.
+ (vhsubq_x_n_s8): Likewise.
+ (vhsubq_x_n_s16): Likewise.
+ (vhsubq_x_n_s32): Likewise.
+ (vhsubq_x_n_u8): Likewise.
+ (vhsubq_x_n_u16): Likewise.
+ (vhsubq_x_n_u32): Likewise.
+ (vhsubq_x_s8): Likewise.
+ (vhsubq_x_s16): Likewise.
+ (vhsubq_x_s32): Likewise.
+ (vhsubq_x_u8): Likewise.
+ (vhsubq_x_u16): Likewise.
+ (vhsubq_x_u32): Likewise.
+ (vrhaddq_x_s8): Likewise.
+ (vrhaddq_x_s16): Likewise.
+ (vrhaddq_x_s32): Likewise.
+ (vrhaddq_x_u8): Likewise.
+ (vrhaddq_x_u16): Likewise.
+ (vrhaddq_x_u32): Likewise.
+ (vrmulhq_x_s8): Likewise.
+ (vrmulhq_x_s16): Likewise.
+ (vrmulhq_x_s32): Likewise.
+ (vrmulhq_x_u8): Likewise.
+ (vrmulhq_x_u16): Likewise.
+ (vrmulhq_x_u32): Likewise.
+ (vandq_x_s8): Likewise.
+ (vandq_x_s16): Likewise.
+ (vandq_x_s32): Likewise.
+ (vandq_x_u8): Likewise.
+ (vandq_x_u16): Likewise.
+ (vandq_x_u32): Likewise.
+ (vbicq_x_s8): Likewise.
+ (vbicq_x_s16): Likewise.
+ (vbicq_x_s32): Likewise.
+ (vbicq_x_u8): Likewise.
+ (vbicq_x_u16): Likewise.
+ (vbicq_x_u32): Likewise.
+ (vbrsrq_x_n_s8): Likewise.
+ (vbrsrq_x_n_s16): Likewise.
+ (vbrsrq_x_n_s32): Likewise.
+ (vbrsrq_x_n_u8): Likewise.
+ (vbrsrq_x_n_u16): Likewise.
+ (vbrsrq_x_n_u32): Likewise.
+ (veorq_x_s8): Likewise.
+ (veorq_x_s16): Likewise.
+ (veorq_x_s32): Likewise.
+ (veorq_x_u8): Likewise.
+ (veorq_x_u16): Likewise.
+ (veorq_x_u32): Likewise.
+ (vmovlbq_x_s8): Likewise.
+ (vmovlbq_x_s16): Likewise.
+ (vmovlbq_x_u8): Likewise.
+ (vmovlbq_x_u16): Likewise.
+ (vmovltq_x_s8): Likewise.
+ (vmovltq_x_s16): Likewise.
+ (vmovltq_x_u8): Likewise.
+ (vmovltq_x_u16): Likewise.
+ (vmvnq_x_s8): Likewise.
+ (vmvnq_x_s16): Likewise.
+ (vmvnq_x_s32): Likewise.
+ (vmvnq_x_u8): Likewise.
+ (vmvnq_x_u16): Likewise.
+ (vmvnq_x_u32): Likewise.
+ (vmvnq_x_n_s16): Likewise.
+ (vmvnq_x_n_s32): Likewise.
+ (vmvnq_x_n_u16): Likewise.
+ (vmvnq_x_n_u32): Likewise.
+ (vornq_x_s8): Likewise.
+ (vornq_x_s16): Likewise.
+ (vornq_x_s32): Likewise.
+ (vornq_x_u8): Likewise.
+ (vornq_x_u16): Likewise.
+ (vornq_x_u32): Likewise.
+ (vorrq_x_s8): Likewise.
+ (vorrq_x_s16): Likewise.
+ (vorrq_x_s32): Likewise.
+ (vorrq_x_u8): Likewise.
+ (vorrq_x_u16): Likewise.
+ (vorrq_x_u32): Likewise.
+ (vrev16q_x_s8): Likewise.
+ (vrev16q_x_u8): Likewise.
+ (vrev32q_x_s8): Likewise.
+ (vrev32q_x_s16): Likewise.
+ (vrev32q_x_u8): Likewise.
+ (vrev32q_x_u16): Likewise.
+ (vrev64q_x_s8): Likewise.
+ (vrev64q_x_s16): Likewise.
+ (vrev64q_x_s32): Likewise.
+ (vrev64q_x_u8): Likewise.
+ (vrev64q_x_u16): Likewise.
+ (vrev64q_x_u32): Likewise.
+ (vrshlq_x_s8): Likewise.
+ (vrshlq_x_s16): Likewise.
+ (vrshlq_x_s32): Likewise.
+ (vrshlq_x_u8): Likewise.
+ (vrshlq_x_u16): Likewise.
+ (vrshlq_x_u32): Likewise.
+ (vshllbq_x_n_s8): Likewise.
+ (vshllbq_x_n_s16): Likewise.
+ (vshllbq_x_n_u8): Likewise.
+ (vshllbq_x_n_u16): Likewise.
+ (vshlltq_x_n_s8): Likewise.
+ (vshlltq_x_n_s16): Likewise.
+ (vshlltq_x_n_u8): Likewise.
+ (vshlltq_x_n_u16): Likewise.
+ (vshlq_x_s8): Likewise.
+ (vshlq_x_s16): Likewise.
+ (vshlq_x_s32): Likewise.
+ (vshlq_x_u8): Likewise.
+ (vshlq_x_u16): Likewise.
+ (vshlq_x_u32): Likewise.
+ (vshlq_x_n_s8): Likewise.
+ (vshlq_x_n_s16): Likewise.
+ (vshlq_x_n_s32): Likewise.
+ (vshlq_x_n_u8): Likewise.
+ (vshlq_x_n_u16): Likewise.
+ (vshlq_x_n_u32): Likewise.
+ (vrshrq_x_n_s8): Likewise.
+ (vrshrq_x_n_s16): Likewise.
+ (vrshrq_x_n_s32): Likewise.
+ (vrshrq_x_n_u8): Likewise.
+ (vrshrq_x_n_u16): Likewise.
+ (vrshrq_x_n_u32): Likewise.
+ (vshrq_x_n_s8): Likewise.
+ (vshrq_x_n_s16): Likewise.
+ (vshrq_x_n_s32): Likewise.
+ (vshrq_x_n_u8): Likewise.
+ (vshrq_x_n_u16): Likewise.
+ (vshrq_x_n_u32): Likewise.
+ (vdupq_x_n_f16): Likewise.
+ (vdupq_x_n_f32): Likewise.
+ (vminnmq_x_f16): Likewise.
+ (vminnmq_x_f32): Likewise.
+ (vmaxnmq_x_f16): Likewise.
+ (vmaxnmq_x_f32): Likewise.
+ (vabdq_x_f16): Likewise.
+ (vabdq_x_f32): Likewise.
+ (vabsq_x_f16): Likewise.
+ (vabsq_x_f32): Likewise.
+ (vaddq_x_f16): Likewise.
+ (vaddq_x_f32): Likewise.
+ (vaddq_x_n_f16): Likewise.
+ (vaddq_x_n_f32): Likewise.
+ (vnegq_x_f16): Likewise.
+ (vnegq_x_f32): Likewise.
+ (vmulq_x_f16): Likewise.
+ (vmulq_x_f32): Likewise.
+ (vmulq_x_n_f16): Likewise.
+ (vmulq_x_n_f32): Likewise.
+ (vsubq_x_f16): Likewise.
+ (vsubq_x_f32): Likewise.
+ (vsubq_x_n_f16): Likewise.
+ (vsubq_x_n_f32): Likewise.
+ (vcaddq_rot90_x_f16): Likewise.
+ (vcaddq_rot90_x_f32): Likewise.
+ (vcaddq_rot270_x_f16): Likewise.
+ (vcaddq_rot270_x_f32): Likewise.
+ (vcmulq_x_f16): Likewise.
+ (vcmulq_x_f32): Likewise.
+ (vcmulq_rot90_x_f16): Likewise.
+ (vcmulq_rot90_x_f32): Likewise.
+ (vcmulq_rot180_x_f16): Likewise.
+ (vcmulq_rot180_x_f32): Likewise.
+ (vcmulq_rot270_x_f16): Likewise.
+ (vcmulq_rot270_x_f32): Likewise.
+ (vcvtaq_x_s16_f16): Likewise.
+ (vcvtaq_x_s32_f32): Likewise.
+ (vcvtaq_x_u16_f16): Likewise.
+ (vcvtaq_x_u32_f32): Likewise.
+ (vcvtnq_x_s16_f16): Likewise.
+ (vcvtnq_x_s32_f32): Likewise.
+ (vcvtnq_x_u16_f16): Likewise.
+ (vcvtnq_x_u32_f32): Likewise.
+ (vcvtpq_x_s16_f16): Likewise.
+ (vcvtpq_x_s32_f32): Likewise.
+ (vcvtpq_x_u16_f16): Likewise.
+ (vcvtpq_x_u32_f32): Likewise.
+ (vcvtmq_x_s16_f16): Likewise.
+ (vcvtmq_x_s32_f32): Likewise.
+ (vcvtmq_x_u16_f16): Likewise.
+ (vcvtmq_x_u32_f32): Likewise.
+ (vcvtbq_x_f32_f16): Likewise.
+ (vcvttq_x_f32_f16): Likewise.
+ (vcvtq_x_f16_u16): Likewise.
+ (vcvtq_x_f16_s16): Likewise.
+ (vcvtq_x_f32_s32): Likewise.
+ (vcvtq_x_f32_u32): Likewise.
+ (vcvtq_x_n_f16_s16): Likewise.
+ (vcvtq_x_n_f16_u16): Likewise.
+ (vcvtq_x_n_f32_s32): Likewise.
+ (vcvtq_x_n_f32_u32): Likewise.
+ (vcvtq_x_s16_f16): Likewise.
+ (vcvtq_x_s32_f32): Likewise.
+ (vcvtq_x_u16_f16): Likewise.
+ (vcvtq_x_u32_f32): Likewise.
+ (vcvtq_x_n_s16_f16): Likewise.
+ (vcvtq_x_n_s32_f32): Likewise.
+ (vcvtq_x_n_u16_f16): Likewise.
+ (vcvtq_x_n_u32_f32): Likewise.
+ (vrndq_x_f16): Likewise.
+ (vrndq_x_f32): Likewise.
+ (vrndnq_x_f16): Likewise.
+ (vrndnq_x_f32): Likewise.
+ (vrndmq_x_f16): Likewise.
+ (vrndmq_x_f32): Likewise.
+ (vrndpq_x_f16): Likewise.
+ (vrndpq_x_f32): Likewise.
+ (vrndaq_x_f16): Likewise.
+ (vrndaq_x_f32): Likewise.
+ (vrndxq_x_f16): Likewise.
+ (vrndxq_x_f32): Likewise.
+ (vandq_x_f16): Likewise.
+ (vandq_x_f32): Likewise.
+ (vbicq_x_f16): Likewise.
+ (vbicq_x_f32): Likewise.
+ (vbrsrq_x_n_f16): Likewise.
+ (vbrsrq_x_n_f32): Likewise.
+ (veorq_x_f16): Likewise.
+ (veorq_x_f32): Likewise.
+ (vornq_x_f16): Likewise.
+ (vornq_x_f32): Likewise.
+ (vorrq_x_f16): Likewise.
+ (vorrq_x_f32): Likewise.
+ (vrev32q_x_f16): Likewise.
+ (vrev64q_x_f16): Likewise.
+ (vrev64q_x_f32): Likewise.
+ (__arm_vddupq_x_n_u8): Define intrinsic.
+ (__arm_vddupq_x_n_u16): Likewise.
+ (__arm_vddupq_x_n_u32): Likewise.
+ (__arm_vddupq_x_wb_u8): Likewise.
+ (__arm_vddupq_x_wb_u16): Likewise.
+ (__arm_vddupq_x_wb_u32): Likewise.
+ (__arm_vdwdupq_x_n_u8): Likewise.
+ (__arm_vdwdupq_x_n_u16): Likewise.
+ (__arm_vdwdupq_x_n_u32): Likewise.
+ (__arm_vdwdupq_x_wb_u8): Likewise.
+ (__arm_vdwdupq_x_wb_u16): Likewise.
+ (__arm_vdwdupq_x_wb_u32): Likewise.
+ (__arm_vidupq_x_n_u8): Likewise.
+ (__arm_vidupq_x_n_u16): Likewise.
+ (__arm_vidupq_x_n_u32): Likewise.
+ (__arm_vidupq_x_wb_u8): Likewise.
+ (__arm_vidupq_x_wb_u16): Likewise.
+ (__arm_vidupq_x_wb_u32): Likewise.
+ (__arm_viwdupq_x_n_u8): Likewise.
+ (__arm_viwdupq_x_n_u16): Likewise.
+ (__arm_viwdupq_x_n_u32): Likewise.
+ (__arm_viwdupq_x_wb_u8): Likewise.
+ (__arm_viwdupq_x_wb_u16): Likewise.
+ (__arm_viwdupq_x_wb_u32): Likewise.
+ (__arm_vdupq_x_n_s8): Likewise.
+ (__arm_vdupq_x_n_s16): Likewise.
+ (__arm_vdupq_x_n_s32): Likewise.
+ (__arm_vdupq_x_n_u8): Likewise.
+ (__arm_vdupq_x_n_u16): Likewise.
+ (__arm_vdupq_x_n_u32): Likewise.
+ (__arm_vminq_x_s8): Likewise.
+ (__arm_vminq_x_s16): Likewise.
+ (__arm_vminq_x_s32): Likewise.
+ (__arm_vminq_x_u8): Likewise.
+ (__arm_vminq_x_u16): Likewise.
+ (__arm_vminq_x_u32): Likewise.
+ (__arm_vmaxq_x_s8): Likewise.
+ (__arm_vmaxq_x_s16): Likewise.
+ (__arm_vmaxq_x_s32): Likewise.
+ (__arm_vmaxq_x_u8): Likewise.
+ (__arm_vmaxq_x_u16): Likewise.
+ (__arm_vmaxq_x_u32): Likewise.
+ (__arm_vabdq_x_s8): Likewise.
+ (__arm_vabdq_x_s16): Likewise.
+ (__arm_vabdq_x_s32): Likewise.
+ (__arm_vabdq_x_u8): Likewise.
+ (__arm_vabdq_x_u16): Likewise.
+ (__arm_vabdq_x_u32): Likewise.
+ (__arm_vabsq_x_s8): Likewise.
+ (__arm_vabsq_x_s16): Likewise.
+ (__arm_vabsq_x_s32): Likewise.
+ (__arm_vaddq_x_s8): Likewise.
+ (__arm_vaddq_x_s16): Likewise.
+ (__arm_vaddq_x_s32): Likewise.
+ (__arm_vaddq_x_n_s8): Likewise.
+ (__arm_vaddq_x_n_s16): Likewise.
+ (__arm_vaddq_x_n_s32): Likewise.
+ (__arm_vaddq_x_u8): Likewise.
+ (__arm_vaddq_x_u16): Likewise.
+ (__arm_vaddq_x_u32): Likewise.
+ (__arm_vaddq_x_n_u8): Likewise.
+ (__arm_vaddq_x_n_u16): Likewise.
+ (__arm_vaddq_x_n_u32): Likewise.
+ (__arm_vclsq_x_s8): Likewise.
+ (__arm_vclsq_x_s16): Likewise.
+ (__arm_vclsq_x_s32): Likewise.
+ (__arm_vclzq_x_s8): Likewise.
+ (__arm_vclzq_x_s16): Likewise.
+ (__arm_vclzq_x_s32): Likewise.
+ (__arm_vclzq_x_u8): Likewise.
+ (__arm_vclzq_x_u16): Likewise.
+ (__arm_vclzq_x_u32): Likewise.
+ (__arm_vnegq_x_s8): Likewise.
+ (__arm_vnegq_x_s16): Likewise.
+ (__arm_vnegq_x_s32): Likewise.
+ (__arm_vmulhq_x_s8): Likewise.
+ (__arm_vmulhq_x_s16): Likewise.
+ (__arm_vmulhq_x_s32): Likewise.
+ (__arm_vmulhq_x_u8): Likewise.
+ (__arm_vmulhq_x_u16): Likewise.
+ (__arm_vmulhq_x_u32): Likewise.
+ (__arm_vmullbq_poly_x_p8): Likewise.
+ (__arm_vmullbq_poly_x_p16): Likewise.
+ (__arm_vmullbq_int_x_s8): Likewise.
+ (__arm_vmullbq_int_x_s16): Likewise.
+ (__arm_vmullbq_int_x_s32): Likewise.
+ (__arm_vmullbq_int_x_u8): Likewise.
+ (__arm_vmullbq_int_x_u16): Likewise.
+ (__arm_vmullbq_int_x_u32): Likewise.
+ (__arm_vmulltq_poly_x_p8): Likewise.
+ (__arm_vmulltq_poly_x_p16): Likewise.
+ (__arm_vmulltq_int_x_s8): Likewise.
+ (__arm_vmulltq_int_x_s16): Likewise.
+ (__arm_vmulltq_int_x_s32): Likewise.
+ (__arm_vmulltq_int_x_u8): Likewise.
+ (__arm_vmulltq_int_x_u16): Likewise.
+ (__arm_vmulltq_int_x_u32): Likewise.
+ (__arm_vmulq_x_s8): Likewise.
+ (__arm_vmulq_x_s16): Likewise.
+ (__arm_vmulq_x_s32): Likewise.
+ (__arm_vmulq_x_n_s8): Likewise.
+ (__arm_vmulq_x_n_s16): Likewise.
+ (__arm_vmulq_x_n_s32): Likewise.
+ (__arm_vmulq_x_u8): Likewise.
+ (__arm_vmulq_x_u16): Likewise.
+ (__arm_vmulq_x_u32): Likewise.
+ (__arm_vmulq_x_n_u8): Likewise.
+ (__arm_vmulq_x_n_u16): Likewise.
+ (__arm_vmulq_x_n_u32): Likewise.
+ (__arm_vsubq_x_s8): Likewise.
+ (__arm_vsubq_x_s16): Likewise.
+ (__arm_vsubq_x_s32): Likewise.
+ (__arm_vsubq_x_n_s8): Likewise.
+ (__arm_vsubq_x_n_s16): Likewise.
+ (__arm_vsubq_x_n_s32): Likewise.
+ (__arm_vsubq_x_u8): Likewise.
+ (__arm_vsubq_x_u16): Likewise.
+ (__arm_vsubq_x_u32): Likewise.
+ (__arm_vsubq_x_n_u8): Likewise.
+ (__arm_vsubq_x_n_u16): Likewise.
+ (__arm_vsubq_x_n_u32): Likewise.
+ (__arm_vcaddq_rot90_x_s8): Likewise.
+ (__arm_vcaddq_rot90_x_s16): Likewise.
+ (__arm_vcaddq_rot90_x_s32): Likewise.
+ (__arm_vcaddq_rot90_x_u8): Likewise.
+ (__arm_vcaddq_rot90_x_u16): Likewise.
+ (__arm_vcaddq_rot90_x_u32): Likewise.
+ (__arm_vcaddq_rot270_x_s8): Likewise.
+ (__arm_vcaddq_rot270_x_s16): Likewise.
+ (__arm_vcaddq_rot270_x_s32): Likewise.
+ (__arm_vcaddq_rot270_x_u8): Likewise.
+ (__arm_vcaddq_rot270_x_u16): Likewise.
+ (__arm_vcaddq_rot270_x_u32): Likewise.
+ (__arm_vhaddq_x_n_s8): Likewise.
+ (__arm_vhaddq_x_n_s16): Likewise.
+ (__arm_vhaddq_x_n_s32): Likewise.
+ (__arm_vhaddq_x_n_u8): Likewise.
+ (__arm_vhaddq_x_n_u16): Likewise.
+ (__arm_vhaddq_x_n_u32): Likewise.
+ (__arm_vhaddq_x_s8): Likewise.
+ (__arm_vhaddq_x_s16): Likewise.
+ (__arm_vhaddq_x_s32): Likewise.
+ (__arm_vhaddq_x_u8): Likewise.
+ (__arm_vhaddq_x_u16): Likewise.
+ (__arm_vhaddq_x_u32): Likewise.
+ (__arm_vhcaddq_rot90_x_s8): Likewise.
+ (__arm_vhcaddq_rot90_x_s16): Likewise.
+ (__arm_vhcaddq_rot90_x_s32): Likewise.
+ (__arm_vhcaddq_rot270_x_s8): Likewise.
+ (__arm_vhcaddq_rot270_x_s16): Likewise.
+ (__arm_vhcaddq_rot270_x_s32): Likewise.
+ (__arm_vhsubq_x_n_s8): Likewise.
+ (__arm_vhsubq_x_n_s16): Likewise.
+ (__arm_vhsubq_x_n_s32): Likewise.
+ (__arm_vhsubq_x_n_u8): Likewise.
+ (__arm_vhsubq_x_n_u16): Likewise.
+ (__arm_vhsubq_x_n_u32): Likewise.
+ (__arm_vhsubq_x_s8): Likewise.
+ (__arm_vhsubq_x_s16): Likewise.
+ (__arm_vhsubq_x_s32): Likewise.
+ (__arm_vhsubq_x_u8): Likewise.
+ (__arm_vhsubq_x_u16): Likewise.
+ (__arm_vhsubq_x_u32): Likewise.
+ (__arm_vrhaddq_x_s8): Likewise.
+ (__arm_vrhaddq_x_s16): Likewise.
+ (__arm_vrhaddq_x_s32): Likewise.
+ (__arm_vrhaddq_x_u8): Likewise.
+ (__arm_vrhaddq_x_u16): Likewise.
+ (__arm_vrhaddq_x_u32): Likewise.
+ (__arm_vrmulhq_x_s8): Likewise.
+ (__arm_vrmulhq_x_s16): Likewise.
+ (__arm_vrmulhq_x_s32): Likewise.
+ (__arm_vrmulhq_x_u8): Likewise.
+ (__arm_vrmulhq_x_u16): Likewise.
+ (__arm_vrmulhq_x_u32): Likewise.
+ (__arm_vandq_x_s8): Likewise.
+ (__arm_vandq_x_s16): Likewise.
+ (__arm_vandq_x_s32): Likewise.
+ (__arm_vandq_x_u8): Likewise.
+ (__arm_vandq_x_u16): Likewise.
+ (__arm_vandq_x_u32): Likewise.
+ (__arm_vbicq_x_s8): Likewise.
+ (__arm_vbicq_x_s16): Likewise.
+ (__arm_vbicq_x_s32): Likewise.
+ (__arm_vbicq_x_u8): Likewise.
+ (__arm_vbicq_x_u16): Likewise.
+ (__arm_vbicq_x_u32): Likewise.
+ (__arm_vbrsrq_x_n_s8): Likewise.
+ (__arm_vbrsrq_x_n_s16): Likewise.
+ (__arm_vbrsrq_x_n_s32): Likewise.
+ (__arm_vbrsrq_x_n_u8): Likewise.
+ (__arm_vbrsrq_x_n_u16): Likewise.
+ (__arm_vbrsrq_x_n_u32): Likewise.
+ (__arm_veorq_x_s8): Likewise.
+ (__arm_veorq_x_s16): Likewise.
+ (__arm_veorq_x_s32): Likewise.
+ (__arm_veorq_x_u8): Likewise.
+ (__arm_veorq_x_u16): Likewise.
+ (__arm_veorq_x_u32): Likewise.
+ (__arm_vmovlbq_x_s8): Likewise.
+ (__arm_vmovlbq_x_s16): Likewise.
+ (__arm_vmovlbq_x_u8): Likewise.
+ (__arm_vmovlbq_x_u16): Likewise.
+ (__arm_vmovltq_x_s8): Likewise.
+ (__arm_vmovltq_x_s16): Likewise.
+ (__arm_vmovltq_x_u8): Likewise.
+ (__arm_vmovltq_x_u16): Likewise.
+ (__arm_vmvnq_x_s8): Likewise.
+ (__arm_vmvnq_x_s16): Likewise.
+ (__arm_vmvnq_x_s32): Likewise.
+ (__arm_vmvnq_x_u8): Likewise.
+ (__arm_vmvnq_x_u16): Likewise.
+ (__arm_vmvnq_x_u32): Likewise.
+ (__arm_vmvnq_x_n_s16): Likewise.
+ (__arm_vmvnq_x_n_s32): Likewise.
+ (__arm_vmvnq_x_n_u16): Likewise.
+ (__arm_vmvnq_x_n_u32): Likewise.
+ (__arm_vornq_x_s8): Likewise.
+ (__arm_vornq_x_s16): Likewise.
+ (__arm_vornq_x_s32): Likewise.
+ (__arm_vornq_x_u8): Likewise.
+ (__arm_vornq_x_u16): Likewise.
+ (__arm_vornq_x_u32): Likewise.
+ (__arm_vorrq_x_s8): Likewise.
+ (__arm_vorrq_x_s16): Likewise.
+ (__arm_vorrq_x_s32): Likewise.
+ (__arm_vorrq_x_u8): Likewise.
+ (__arm_vorrq_x_u16): Likewise.
+ (__arm_vorrq_x_u32): Likewise.
+ (__arm_vrev16q_x_s8): Likewise.
+ (__arm_vrev16q_x_u8): Likewise.
+ (__arm_vrev32q_x_s8): Likewise.
+ (__arm_vrev32q_x_s16): Likewise.
+ (__arm_vrev32q_x_u8): Likewise.
+ (__arm_vrev32q_x_u16): Likewise.
+ (__arm_vrev64q_x_s8): Likewise.
+ (__arm_vrev64q_x_s16): Likewise.
+ (__arm_vrev64q_x_s32): Likewise.
+ (__arm_vrev64q_x_u8): Likewise.
+ (__arm_vrev64q_x_u16): Likewise.
+ (__arm_vrev64q_x_u32): Likewise.
+ (__arm_vrshlq_x_s8): Likewise.
+ (__arm_vrshlq_x_s16): Likewise.
+ (__arm_vrshlq_x_s32): Likewise.
+ (__arm_vrshlq_x_u8): Likewise.
+ (__arm_vrshlq_x_u16): Likewise.
+ (__arm_vrshlq_x_u32): Likewise.
+ (__arm_vshllbq_x_n_s8): Likewise.
+ (__arm_vshllbq_x_n_s16): Likewise.
+ (__arm_vshllbq_x_n_u8): Likewise.
+ (__arm_vshllbq_x_n_u16): Likewise.
+ (__arm_vshlltq_x_n_s8): Likewise.
+ (__arm_vshlltq_x_n_s16): Likewise.
+ (__arm_vshlltq_x_n_u8): Likewise.
+ (__arm_vshlltq_x_n_u16): Likewise.
+ (__arm_vshlq_x_s8): Likewise.
+ (__arm_vshlq_x_s16): Likewise.
+ (__arm_vshlq_x_s32): Likewise.
+ (__arm_vshlq_x_u8): Likewise.
+ (__arm_vshlq_x_u16): Likewise.
+ (__arm_vshlq_x_u32): Likewise.
+ (__arm_vshlq_x_n_s8): Likewise.
+ (__arm_vshlq_x_n_s16): Likewise.
+ (__arm_vshlq_x_n_s32): Likewise.
+ (__arm_vshlq_x_n_u8): Likewise.
+ (__arm_vshlq_x_n_u16): Likewise.
+ (__arm_vshlq_x_n_u32): Likewise.
+ (__arm_vrshrq_x_n_s8): Likewise.
+ (__arm_vrshrq_x_n_s16): Likewise.
+ (__arm_vrshrq_x_n_s32): Likewise.
+ (__arm_vrshrq_x_n_u8): Likewise.
+ (__arm_vrshrq_x_n_u16): Likewise.
+ (__arm_vrshrq_x_n_u32): Likewise.
+ (__arm_vshrq_x_n_s8): Likewise.
+ (__arm_vshrq_x_n_s16): Likewise.
+ (__arm_vshrq_x_n_s32): Likewise.
+ (__arm_vshrq_x_n_u8): Likewise.
+ (__arm_vshrq_x_n_u16): Likewise.
+ (__arm_vshrq_x_n_u32): Likewise.
+ (__arm_vdupq_x_n_f16): Likewise.
+ (__arm_vdupq_x_n_f32): Likewise.
+ (__arm_vminnmq_x_f16): Likewise.
+ (__arm_vminnmq_x_f32): Likewise.
+ (__arm_vmaxnmq_x_f16): Likewise.
+ (__arm_vmaxnmq_x_f32): Likewise.
+ (__arm_vabdq_x_f16): Likewise.
+ (__arm_vabdq_x_f32): Likewise.
+ (__arm_vabsq_x_f16): Likewise.
+ (__arm_vabsq_x_f32): Likewise.
+ (__arm_vaddq_x_f16): Likewise.
+ (__arm_vaddq_x_f32): Likewise.
+ (__arm_vaddq_x_n_f16): Likewise.
+ (__arm_vaddq_x_n_f32): Likewise.
+ (__arm_vnegq_x_f16): Likewise.
+ (__arm_vnegq_x_f32): Likewise.
+ (__arm_vmulq_x_f16): Likewise.
+ (__arm_vmulq_x_f32): Likewise.
+ (__arm_vmulq_x_n_f16): Likewise.
+ (__arm_vmulq_x_n_f32): Likewise.
+ (__arm_vsubq_x_f16): Likewise.
+ (__arm_vsubq_x_f32): Likewise.
+ (__arm_vsubq_x_n_f16): Likewise.
+ (__arm_vsubq_x_n_f32): Likewise.
+ (__arm_vcaddq_rot90_x_f16): Likewise.
+ (__arm_vcaddq_rot90_x_f32): Likewise.
+ (__arm_vcaddq_rot270_x_f16): Likewise.
+ (__arm_vcaddq_rot270_x_f32): Likewise.
+ (__arm_vcmulq_x_f16): Likewise.
+ (__arm_vcmulq_x_f32): Likewise.
+ (__arm_vcmulq_rot90_x_f16): Likewise.
+ (__arm_vcmulq_rot90_x_f32): Likewise.
+ (__arm_vcmulq_rot180_x_f16): Likewise.
+ (__arm_vcmulq_rot180_x_f32): Likewise.
+ (__arm_vcmulq_rot270_x_f16): Likewise.
+ (__arm_vcmulq_rot270_x_f32): Likewise.
+ (__arm_vcvtaq_x_s16_f16): Likewise.
+ (__arm_vcvtaq_x_s32_f32): Likewise.
+ (__arm_vcvtaq_x_u16_f16): Likewise.
+ (__arm_vcvtaq_x_u32_f32): Likewise.
+ (__arm_vcvtnq_x_s16_f16): Likewise.
+ (__arm_vcvtnq_x_s32_f32): Likewise.
+ (__arm_vcvtnq_x_u16_f16): Likewise.
+ (__arm_vcvtnq_x_u32_f32): Likewise.
+ (__arm_vcvtpq_x_s16_f16): Likewise.
+ (__arm_vcvtpq_x_s32_f32): Likewise.
+ (__arm_vcvtpq_x_u16_f16): Likewise.
+ (__arm_vcvtpq_x_u32_f32): Likewise.
+ (__arm_vcvtmq_x_s16_f16): Likewise.
+ (__arm_vcvtmq_x_s32_f32): Likewise.
+ (__arm_vcvtmq_x_u16_f16): Likewise.
+ (__arm_vcvtmq_x_u32_f32): Likewise.
+ (__arm_vcvtbq_x_f32_f16): Likewise.
+ (__arm_vcvttq_x_f32_f16): Likewise.
+ (__arm_vcvtq_x_f16_u16): Likewise.
+ (__arm_vcvtq_x_f16_s16): Likewise.
+ (__arm_vcvtq_x_f32_s32): Likewise.
+ (__arm_vcvtq_x_f32_u32): Likewise.
+ (__arm_vcvtq_x_n_f16_s16): Likewise.
+ (__arm_vcvtq_x_n_f16_u16): Likewise.
+ (__arm_vcvtq_x_n_f32_s32): Likewise.
+ (__arm_vcvtq_x_n_f32_u32): Likewise.
+ (__arm_vcvtq_x_s16_f16): Likewise.
+ (__arm_vcvtq_x_s32_f32): Likewise.
+ (__arm_vcvtq_x_u16_f16): Likewise.
+ (__arm_vcvtq_x_u32_f32): Likewise.
+ (__arm_vcvtq_x_n_s16_f16): Likewise.
+ (__arm_vcvtq_x_n_s32_f32): Likewise.
+ (__arm_vcvtq_x_n_u16_f16): Likewise.
+ (__arm_vcvtq_x_n_u32_f32): Likewise.
+ (__arm_vrndq_x_f16): Likewise.
+ (__arm_vrndq_x_f32): Likewise.
+ (__arm_vrndnq_x_f16): Likewise.
+ (__arm_vrndnq_x_f32): Likewise.
+ (__arm_vrndmq_x_f16): Likewise.
+ (__arm_vrndmq_x_f32): Likewise.
+ (__arm_vrndpq_x_f16): Likewise.
+ (__arm_vrndpq_x_f32): Likewise.
+ (__arm_vrndaq_x_f16): Likewise.
+ (__arm_vrndaq_x_f32): Likewise.
+ (__arm_vrndxq_x_f16): Likewise.
+ (__arm_vrndxq_x_f32): Likewise.
+ (__arm_vandq_x_f16): Likewise.
+ (__arm_vandq_x_f32): Likewise.
+ (__arm_vbicq_x_f16): Likewise.
+ (__arm_vbicq_x_f32): Likewise.
+ (__arm_vbrsrq_x_n_f16): Likewise.
+ (__arm_vbrsrq_x_n_f32): Likewise.
+ (__arm_veorq_x_f16): Likewise.
+ (__arm_veorq_x_f32): Likewise.
+ (__arm_vornq_x_f16): Likewise.
+ (__arm_vornq_x_f32): Likewise.
+ (__arm_vorrq_x_f16): Likewise.
+ (__arm_vorrq_x_f32): Likewise.
+ (__arm_vrev32q_x_f16): Likewise.
+ (__arm_vrev64q_x_f16): Likewise.
+ (__arm_vrev64q_x_f32): Likewise.
+ (vabdq_x): Define polymorphic variant.
+ (vabsq_x): Likewise.
+ (vaddq_x): Likewise.
+ (vandq_x): Likewise.
+ (vbicq_x): Likewise.
+ (vbrsrq_x): Likewise.
+ (vcaddq_rot270_x): Likewise.
+ (vcaddq_rot90_x): Likewise.
+ (vcmulq_rot180_x): Likewise.
+ (vcmulq_rot270_x): Likewise.
+ (vcmulq_x): Likewise.
+ (vcvtq_x): Likewise.
+ (vcvtq_x_n): Likewise.
+ (vcvtnq_m): Likewise.
+ (veorq_x): Likewise.
+ (vmaxnmq_x): Likewise.
+ (vminnmq_x): Likewise.
+ (vmulq_x): Likewise.
+ (vnegq_x): Likewise.
+ (vornq_x): Likewise.
+ (vorrq_x): Likewise.
+ (vrev32q_x): Likewise.
+ (vrev64q_x): Likewise.
+ (vrndaq_x): Likewise.
+ (vrndmq_x): Likewise.
+ (vrndnq_x): Likewise.
+ (vrndpq_x): Likewise.
+ (vrndq_x): Likewise.
+ (vrndxq_x): Likewise.
+ (vsubq_x): Likewise.
+ (vcmulq_rot90_x): Likewise.
+ (vadciq): Likewise.
+ (vclsq_x): Likewise.
+ (vclzq_x): Likewise.
+ (vhaddq_x): Likewise.
+ (vhcaddq_rot270_x): Likewise.
+ (vhcaddq_rot90_x): Likewise.
+ (vhsubq_x): Likewise.
+ (vmaxq_x): Likewise.
+ (vminq_x): Likewise.
+ (vmovlbq_x): Likewise.
+ (vmovltq_x): Likewise.
+ (vmulhq_x): Likewise.
+ (vmullbq_int_x): Likewise.
+ (vmullbq_poly_x): Likewise.
+ (vmulltq_int_x): Likewise.
+ (vmulltq_poly_x): Likewise.
+ (vmvnq_x): Likewise.
+ (vrev16q_x): Likewise.
+ (vrhaddq_x): Likewise.
+ (vrmulhq_x): Likewise.
+ (vrshlq_x): Likewise.
+ (vrshrq_x): Likewise.
+ (vshllbq_x): Likewise.
+ (vshlltq_x): Likewise.
+ (vshlq_x_n): Likewise.
+ (vshlq_x): Likewise.
+ (vdwdupq_x_u8): Likewise.
+ (vdwdupq_x_u16): Likewise.
+ (vdwdupq_x_u32): Likewise.
+ (viwdupq_x_u8): Likewise.
+ (viwdupq_x_u16): Likewise.
+ (viwdupq_x_u32): Likewise.
+ (vidupq_x_u8): Likewise.
+ (vddupq_x_u8): Likewise.
+ (vidupq_x_u16): Likewise.
+ (vddupq_x_u16): Likewise.
+ (vidupq_x_u32): Likewise.
+ (vddupq_x_u32): Likewise.
+ (vshrq_x): Likewise.
+
2020-03-20 Richard Biener <rguenther@suse.de>
* tree-vect-slp.c (vect_analyze_slp_instance): Push the stmts
diff --git a/gcc/config/arm/arm_mve.h b/gcc/config/arm/arm_mve.h
index 969908b..77df7c7 100644
--- a/gcc/config/arm/arm_mve.h
+++ b/gcc/config/arm/arm_mve.h
@@ -2074,6 +2074,382 @@ typedef struct { uint8x16_t val[4]; } uint8x16x4_t;
#define vstrwq_scatter_base_wb_s32(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb_s32(__addr, __offset, __value)
#define vstrwq_scatter_base_wb_u32(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb_u32(__addr, __offset, __value)
#define vstrwq_scatter_base_wb_f32(__addr, __offset, __value) __arm_vstrwq_scatter_base_wb_f32(__addr, __offset, __value)
+#define vddupq_x_n_u8(__a, __imm, __p) __arm_vddupq_x_n_u8(__a, __imm, __p)
+#define vddupq_x_n_u16(__a, __imm, __p) __arm_vddupq_x_n_u16(__a, __imm, __p)
+#define vddupq_x_n_u32(__a, __imm, __p) __arm_vddupq_x_n_u32(__a, __imm, __p)
+#define vddupq_x_wb_u8(__a, __imm, __p) __arm_vddupq_x_wb_u8(__a, __imm, __p)
+#define vddupq_x_wb_u16(__a, __imm, __p) __arm_vddupq_x_wb_u16(__a, __imm, __p)
+#define vddupq_x_wb_u32(__a, __imm, __p) __arm_vddupq_x_wb_u32(__a, __imm, __p)
+#define vdwdupq_x_n_u8(__a, __b, __imm, __p) __arm_vdwdupq_x_n_u8(__a, __b, __imm, __p)
+#define vdwdupq_x_n_u16(__a, __b, __imm, __p) __arm_vdwdupq_x_n_u16(__a, __b, __imm, __p)
+#define vdwdupq_x_n_u32(__a, __b, __imm, __p) __arm_vdwdupq_x_n_u32(__a, __b, __imm, __p)
+#define vdwdupq_x_wb_u8(__a, __b, __imm, __p) __arm_vdwdupq_x_wb_u8(__a, __b, __imm, __p)
+#define vdwdupq_x_wb_u16(__a, __b, __imm, __p) __arm_vdwdupq_x_wb_u16(__a, __b, __imm, __p)
+#define vdwdupq_x_wb_u32(__a, __b, __imm, __p) __arm_vdwdupq_x_wb_u32(__a, __b, __imm, __p)
+#define vidupq_x_n_u8(__a, __imm, __p) __arm_vidupq_x_n_u8(__a, __imm, __p)
+#define vidupq_x_n_u16(__a, __imm, __p) __arm_vidupq_x_n_u16(__a, __imm, __p)
+#define vidupq_x_n_u32(__a, __imm, __p) __arm_vidupq_x_n_u32(__a, __imm, __p)
+#define vidupq_x_wb_u8(__a, __imm, __p) __arm_vidupq_x_wb_u8(__a, __imm, __p)
+#define vidupq_x_wb_u16(__a, __imm, __p) __arm_vidupq_x_wb_u16(__a, __imm, __p)
+#define vidupq_x_wb_u32(__a, __imm, __p) __arm_vidupq_x_wb_u32(__a, __imm, __p)
+#define viwdupq_x_n_u8(__a, __b, __imm, __p) __arm_viwdupq_x_n_u8(__a, __b, __imm, __p)
+#define viwdupq_x_n_u16(__a, __b, __imm, __p) __arm_viwdupq_x_n_u16(__a, __b, __imm, __p)
+#define viwdupq_x_n_u32(__a, __b, __imm, __p) __arm_viwdupq_x_n_u32(__a, __b, __imm, __p)
+#define viwdupq_x_wb_u8(__a, __b, __imm, __p) __arm_viwdupq_x_wb_u8(__a, __b, __imm, __p)
+#define viwdupq_x_wb_u16(__a, __b, __imm, __p) __arm_viwdupq_x_wb_u16(__a, __b, __imm, __p)
+#define viwdupq_x_wb_u32(__a, __b, __imm, __p) __arm_viwdupq_x_wb_u32(__a, __b, __imm, __p)
+#define vdupq_x_n_s8(__a, __p) __arm_vdupq_x_n_s8(__a, __p)
+#define vdupq_x_n_s16(__a, __p) __arm_vdupq_x_n_s16(__a, __p)
+#define vdupq_x_n_s32(__a, __p) __arm_vdupq_x_n_s32(__a, __p)
+#define vdupq_x_n_u8(__a, __p) __arm_vdupq_x_n_u8(__a, __p)
+#define vdupq_x_n_u16(__a, __p) __arm_vdupq_x_n_u16(__a, __p)
+#define vdupq_x_n_u32(__a, __p) __arm_vdupq_x_n_u32(__a, __p)
+#define vminq_x_s8(__a, __b, __p) __arm_vminq_x_s8(__a, __b, __p)
+#define vminq_x_s16(__a, __b, __p) __arm_vminq_x_s16(__a, __b, __p)
+#define vminq_x_s32(__a, __b, __p) __arm_vminq_x_s32(__a, __b, __p)
+#define vminq_x_u8(__a, __b, __p) __arm_vminq_x_u8(__a, __b, __p)
+#define vminq_x_u16(__a, __b, __p) __arm_vminq_x_u16(__a, __b, __p)
+#define vminq_x_u32(__a, __b, __p) __arm_vminq_x_u32(__a, __b, __p)
+#define vmaxq_x_s8(__a, __b, __p) __arm_vmaxq_x_s8(__a, __b, __p)
+#define vmaxq_x_s16(__a, __b, __p) __arm_vmaxq_x_s16(__a, __b, __p)
+#define vmaxq_x_s32(__a, __b, __p) __arm_vmaxq_x_s32(__a, __b, __p)
+#define vmaxq_x_u8(__a, __b, __p) __arm_vmaxq_x_u8(__a, __b, __p)
+#define vmaxq_x_u16(__a, __b, __p) __arm_vmaxq_x_u16(__a, __b, __p)
+#define vmaxq_x_u32(__a, __b, __p) __arm_vmaxq_x_u32(__a, __b, __p)
+#define vabdq_x_s8(__a, __b, __p) __arm_vabdq_x_s8(__a, __b, __p)
+#define vabdq_x_s16(__a, __b, __p) __arm_vabdq_x_s16(__a, __b, __p)
+#define vabdq_x_s32(__a, __b, __p) __arm_vabdq_x_s32(__a, __b, __p)
+#define vabdq_x_u8(__a, __b, __p) __arm_vabdq_x_u8(__a, __b, __p)
+#define vabdq_x_u16(__a, __b, __p) __arm_vabdq_x_u16(__a, __b, __p)
+#define vabdq_x_u32(__a, __b, __p) __arm_vabdq_x_u32(__a, __b, __p)
+#define vabsq_x_s8(__a, __p) __arm_vabsq_x_s8(__a, __p)
+#define vabsq_x_s16(__a, __p) __arm_vabsq_x_s16(__a, __p)
+#define vabsq_x_s32(__a, __p) __arm_vabsq_x_s32(__a, __p)
+#define vaddq_x_s8(__a, __b, __p) __arm_vaddq_x_s8(__a, __b, __p)
+#define vaddq_x_s16(__a, __b, __p) __arm_vaddq_x_s16(__a, __b, __p)
+#define vaddq_x_s32(__a, __b, __p) __arm_vaddq_x_s32(__a, __b, __p)
+#define vaddq_x_n_s8(__a, __b, __p) __arm_vaddq_x_n_s8(__a, __b, __p)
+#define vaddq_x_n_s16(__a, __b, __p) __arm_vaddq_x_n_s16(__a, __b, __p)
+#define vaddq_x_n_s32(__a, __b, __p) __arm_vaddq_x_n_s32(__a, __b, __p)
+#define vaddq_x_u8(__a, __b, __p) __arm_vaddq_x_u8(__a, __b, __p)
+#define vaddq_x_u16(__a, __b, __p) __arm_vaddq_x_u16(__a, __b, __p)
+#define vaddq_x_u32(__a, __b, __p) __arm_vaddq_x_u32(__a, __b, __p)
+#define vaddq_x_n_u8(__a, __b, __p) __arm_vaddq_x_n_u8(__a, __b, __p)
+#define vaddq_x_n_u16(__a, __b, __p) __arm_vaddq_x_n_u16(__a, __b, __p)
+#define vaddq_x_n_u32(__a, __b, __p) __arm_vaddq_x_n_u32(__a, __b, __p)
+#define vclsq_x_s8(__a, __p) __arm_vclsq_x_s8(__a, __p)
+#define vclsq_x_s16(__a, __p) __arm_vclsq_x_s16(__a, __p)
+#define vclsq_x_s32(__a, __p) __arm_vclsq_x_s32(__a, __p)
+#define vclzq_x_s8(__a, __p) __arm_vclzq_x_s8(__a, __p)
+#define vclzq_x_s16(__a, __p) __arm_vclzq_x_s16(__a, __p)
+#define vclzq_x_s32(__a, __p) __arm_vclzq_x_s32(__a, __p)
+#define vclzq_x_u8(__a, __p) __arm_vclzq_x_u8(__a, __p)
+#define vclzq_x_u16(__a, __p) __arm_vclzq_x_u16(__a, __p)
+#define vclzq_x_u32(__a, __p) __arm_vclzq_x_u32(__a, __p)
+#define vnegq_x_s8(__a, __p) __arm_vnegq_x_s8(__a, __p)
+#define vnegq_x_s16(__a, __p) __arm_vnegq_x_s16(__a, __p)
+#define vnegq_x_s32(__a, __p) __arm_vnegq_x_s32(__a, __p)
+#define vmulhq_x_s8(__a, __b, __p) __arm_vmulhq_x_s8(__a, __b, __p)
+#define vmulhq_x_s16(__a, __b, __p) __arm_vmulhq_x_s16(__a, __b, __p)
+#define vmulhq_x_s32(__a, __b, __p) __arm_vmulhq_x_s32(__a, __b, __p)
+#define vmulhq_x_u8(__a, __b, __p) __arm_vmulhq_x_u8(__a, __b, __p)
+#define vmulhq_x_u16(__a, __b, __p) __arm_vmulhq_x_u16(__a, __b, __p)
+#define vmulhq_x_u32(__a, __b, __p) __arm_vmulhq_x_u32(__a, __b, __p)
+#define vmullbq_poly_x_p8(__a, __b, __p) __arm_vmullbq_poly_x_p8(__a, __b, __p)
+#define vmullbq_poly_x_p16(__a, __b, __p) __arm_vmullbq_poly_x_p16(__a, __b, __p)
+#define vmullbq_int_x_s8(__a, __b, __p) __arm_vmullbq_int_x_s8(__a, __b, __p)
+#define vmullbq_int_x_s16(__a, __b, __p) __arm_vmullbq_int_x_s16(__a, __b, __p)
+#define vmullbq_int_x_s32(__a, __b, __p) __arm_vmullbq_int_x_s32(__a, __b, __p)
+#define vmullbq_int_x_u8(__a, __b, __p) __arm_vmullbq_int_x_u8(__a, __b, __p)
+#define vmullbq_int_x_u16(__a, __b, __p) __arm_vmullbq_int_x_u16(__a, __b, __p)
+#define vmullbq_int_x_u32(__a, __b, __p) __arm_vmullbq_int_x_u32(__a, __b, __p)
+#define vmulltq_poly_x_p8(__a, __b, __p) __arm_vmulltq_poly_x_p8(__a, __b, __p)
+#define vmulltq_poly_x_p16(__a, __b, __p) __arm_vmulltq_poly_x_p16(__a, __b, __p)
+#define vmulltq_int_x_s8(__a, __b, __p) __arm_vmulltq_int_x_s8(__a, __b, __p)
+#define vmulltq_int_x_s16(__a, __b, __p) __arm_vmulltq_int_x_s16(__a, __b, __p)
+#define vmulltq_int_x_s32(__a, __b, __p) __arm_vmulltq_int_x_s32(__a, __b, __p)
+#define vmulltq_int_x_u8(__a, __b, __p) __arm_vmulltq_int_x_u8(__a, __b, __p)
+#define vmulltq_int_x_u16(__a, __b, __p) __arm_vmulltq_int_x_u16(__a, __b, __p)
+#define vmulltq_int_x_u32(__a, __b, __p) __arm_vmulltq_int_x_u32(__a, __b, __p)
+#define vmulq_x_s8(__a, __b, __p) __arm_vmulq_x_s8(__a, __b, __p)
+#define vmulq_x_s16(__a, __b, __p) __arm_vmulq_x_s16(__a, __b, __p)
+#define vmulq_x_s32(__a, __b, __p) __arm_vmulq_x_s32(__a, __b, __p)
+#define vmulq_x_n_s8(__a, __b, __p) __arm_vmulq_x_n_s8(__a, __b, __p)
+#define vmulq_x_n_s16(__a, __b, __p) __arm_vmulq_x_n_s16(__a, __b, __p)
+#define vmulq_x_n_s32(__a, __b, __p) __arm_vmulq_x_n_s32(__a, __b, __p)
+#define vmulq_x_u8(__a, __b, __p) __arm_vmulq_x_u8(__a, __b, __p)
+#define vmulq_x_u16(__a, __b, __p) __arm_vmulq_x_u16(__a, __b, __p)
+#define vmulq_x_u32(__a, __b, __p) __arm_vmulq_x_u32(__a, __b, __p)
+#define vmulq_x_n_u8(__a, __b, __p) __arm_vmulq_x_n_u8(__a, __b, __p)
+#define vmulq_x_n_u16(__a, __b, __p) __arm_vmulq_x_n_u16(__a, __b, __p)
+#define vmulq_x_n_u32(__a, __b, __p) __arm_vmulq_x_n_u32(__a, __b, __p)
+#define vsubq_x_s8(__a, __b, __p) __arm_vsubq_x_s8(__a, __b, __p)
+#define vsubq_x_s16(__a, __b, __p) __arm_vsubq_x_s16(__a, __b, __p)
+#define vsubq_x_s32(__a, __b, __p) __arm_vsubq_x_s32(__a, __b, __p)
+#define vsubq_x_n_s8(__a, __b, __p) __arm_vsubq_x_n_s8(__a, __b, __p)
+#define vsubq_x_n_s16(__a, __b, __p) __arm_vsubq_x_n_s16(__a, __b, __p)
+#define vsubq_x_n_s32(__a, __b, __p) __arm_vsubq_x_n_s32(__a, __b, __p)
+#define vsubq_x_u8(__a, __b, __p) __arm_vsubq_x_u8(__a, __b, __p)
+#define vsubq_x_u16(__a, __b, __p) __arm_vsubq_x_u16(__a, __b, __p)
+#define vsubq_x_u32(__a, __b, __p) __arm_vsubq_x_u32(__a, __b, __p)
+#define vsubq_x_n_u8(__a, __b, __p) __arm_vsubq_x_n_u8(__a, __b, __p)
+#define vsubq_x_n_u16(__a, __b, __p) __arm_vsubq_x_n_u16(__a, __b, __p)
+#define vsubq_x_n_u32(__a, __b, __p) __arm_vsubq_x_n_u32(__a, __b, __p)
+#define vcaddq_rot90_x_s8(__a, __b, __p) __arm_vcaddq_rot90_x_s8(__a, __b, __p)
+#define vcaddq_rot90_x_s16(__a, __b, __p) __arm_vcaddq_rot90_x_s16(__a, __b, __p)
+#define vcaddq_rot90_x_s32(__a, __b, __p) __arm_vcaddq_rot90_x_s32(__a, __b, __p)
+#define vcaddq_rot90_x_u8(__a, __b, __p) __arm_vcaddq_rot90_x_u8(__a, __b, __p)
+#define vcaddq_rot90_x_u16(__a, __b, __p) __arm_vcaddq_rot90_x_u16(__a, __b, __p)
+#define vcaddq_rot90_x_u32(__a, __b, __p) __arm_vcaddq_rot90_x_u32(__a, __b, __p)
+#define vcaddq_rot270_x_s8(__a, __b, __p) __arm_vcaddq_rot270_x_s8(__a, __b, __p)
+#define vcaddq_rot270_x_s16(__a, __b, __p) __arm_vcaddq_rot270_x_s16(__a, __b, __p)
+#define vcaddq_rot270_x_s32(__a, __b, __p) __arm_vcaddq_rot270_x_s32(__a, __b, __p)
+#define vcaddq_rot270_x_u8(__a, __b, __p) __arm_vcaddq_rot270_x_u8(__a, __b, __p)
+#define vcaddq_rot270_x_u16(__a, __b, __p) __arm_vcaddq_rot270_x_u16(__a, __b, __p)
+#define vcaddq_rot270_x_u32(__a, __b, __p) __arm_vcaddq_rot270_x_u32(__a, __b, __p)
+#define vhaddq_x_n_s8(__a, __b, __p) __arm_vhaddq_x_n_s8(__a, __b, __p)
+#define vhaddq_x_n_s16(__a, __b, __p) __arm_vhaddq_x_n_s16(__a, __b, __p)
+#define vhaddq_x_n_s32(__a, __b, __p) __arm_vhaddq_x_n_s32(__a, __b, __p)
+#define vhaddq_x_n_u8(__a, __b, __p) __arm_vhaddq_x_n_u8(__a, __b, __p)
+#define vhaddq_x_n_u16(__a, __b, __p) __arm_vhaddq_x_n_u16(__a, __b, __p)
+#define vhaddq_x_n_u32(__a, __b, __p) __arm_vhaddq_x_n_u32(__a, __b, __p)
+#define vhaddq_x_s8(__a, __b, __p) __arm_vhaddq_x_s8(__a, __b, __p)
+#define vhaddq_x_s16(__a, __b, __p) __arm_vhaddq_x_s16(__a, __b, __p)
+#define vhaddq_x_s32(__a, __b, __p) __arm_vhaddq_x_s32(__a, __b, __p)
+#define vhaddq_x_u8(__a, __b, __p) __arm_vhaddq_x_u8(__a, __b, __p)
+#define vhaddq_x_u16(__a, __b, __p) __arm_vhaddq_x_u16(__a, __b, __p)
+#define vhaddq_x_u32(__a, __b, __p) __arm_vhaddq_x_u32(__a, __b, __p)
+#define vhcaddq_rot90_x_s8(__a, __b, __p) __arm_vhcaddq_rot90_x_s8(__a, __b, __p)
+#define vhcaddq_rot90_x_s16(__a, __b, __p) __arm_vhcaddq_rot90_x_s16(__a, __b, __p)
+#define vhcaddq_rot90_x_s32(__a, __b, __p) __arm_vhcaddq_rot90_x_s32(__a, __b, __p)
+#define vhcaddq_rot270_x_s8(__a, __b, __p) __arm_vhcaddq_rot270_x_s8(__a, __b, __p)
+#define vhcaddq_rot270_x_s16(__a, __b, __p) __arm_vhcaddq_rot270_x_s16(__a, __b, __p)
+#define vhcaddq_rot270_x_s32(__a, __b, __p) __arm_vhcaddq_rot270_x_s32(__a, __b, __p)
+#define vhsubq_x_n_s8(__a, __b, __p) __arm_vhsubq_x_n_s8(__a, __b, __p)
+#define vhsubq_x_n_s16(__a, __b, __p) __arm_vhsubq_x_n_s16(__a, __b, __p)
+#define vhsubq_x_n_s32(__a, __b, __p) __arm_vhsubq_x_n_s32(__a, __b, __p)
+#define vhsubq_x_n_u8(__a, __b, __p) __arm_vhsubq_x_n_u8(__a, __b, __p)
+#define vhsubq_x_n_u16(__a, __b, __p) __arm_vhsubq_x_n_u16(__a, __b, __p)
+#define vhsubq_x_n_u32(__a, __b, __p) __arm_vhsubq_x_n_u32(__a, __b, __p)
+#define vhsubq_x_s8(__a, __b, __p) __arm_vhsubq_x_s8(__a, __b, __p)
+#define vhsubq_x_s16(__a, __b, __p) __arm_vhsubq_x_s16(__a, __b, __p)
+#define vhsubq_x_s32(__a, __b, __p) __arm_vhsubq_x_s32(__a, __b, __p)
+#define vhsubq_x_u8(__a, __b, __p) __arm_vhsubq_x_u8(__a, __b, __p)
+#define vhsubq_x_u16(__a, __b, __p) __arm_vhsubq_x_u16(__a, __b, __p)
+#define vhsubq_x_u32(__a, __b, __p) __arm_vhsubq_x_u32(__a, __b, __p)
+#define vrhaddq_x_s8(__a, __b, __p) __arm_vrhaddq_x_s8(__a, __b, __p)
+#define vrhaddq_x_s16(__a, __b, __p) __arm_vrhaddq_x_s16(__a, __b, __p)
+#define vrhaddq_x_s32(__a, __b, __p) __arm_vrhaddq_x_s32(__a, __b, __p)
+#define vrhaddq_x_u8(__a, __b, __p) __arm_vrhaddq_x_u8(__a, __b, __p)
+#define vrhaddq_x_u16(__a, __b, __p) __arm_vrhaddq_x_u16(__a, __b, __p)
+#define vrhaddq_x_u32(__a, __b, __p) __arm_vrhaddq_x_u32(__a, __b, __p)
+#define vrmulhq_x_s8(__a, __b, __p) __arm_vrmulhq_x_s8(__a, __b, __p)
+#define vrmulhq_x_s16(__a, __b, __p) __arm_vrmulhq_x_s16(__a, __b, __p)
+#define vrmulhq_x_s32(__a, __b, __p) __arm_vrmulhq_x_s32(__a, __b, __p)
+#define vrmulhq_x_u8(__a, __b, __p) __arm_vrmulhq_x_u8(__a, __b, __p)
+#define vrmulhq_x_u16(__a, __b, __p) __arm_vrmulhq_x_u16(__a, __b, __p)
+#define vrmulhq_x_u32(__a, __b, __p) __arm_vrmulhq_x_u32(__a, __b, __p)
+#define vandq_x_s8(__a, __b, __p) __arm_vandq_x_s8(__a, __b, __p)
+#define vandq_x_s16(__a, __b, __p) __arm_vandq_x_s16(__a, __b, __p)
+#define vandq_x_s32(__a, __b, __p) __arm_vandq_x_s32(__a, __b, __p)
+#define vandq_x_u8(__a, __b, __p) __arm_vandq_x_u8(__a, __b, __p)
+#define vandq_x_u16(__a, __b, __p) __arm_vandq_x_u16(__a, __b, __p)
+#define vandq_x_u32(__a, __b, __p) __arm_vandq_x_u32(__a, __b, __p)
+#define vbicq_x_s8(__a, __b, __p) __arm_vbicq_x_s8(__a, __b, __p)
+#define vbicq_x_s16(__a, __b, __p) __arm_vbicq_x_s16(__a, __b, __p)
+#define vbicq_x_s32(__a, __b, __p) __arm_vbicq_x_s32(__a, __b, __p)
+#define vbicq_x_u8(__a, __b, __p) __arm_vbicq_x_u8(__a, __b, __p)
+#define vbicq_x_u16(__a, __b, __p) __arm_vbicq_x_u16(__a, __b, __p)
+#define vbicq_x_u32(__a, __b, __p) __arm_vbicq_x_u32(__a, __b, __p)
+#define vbrsrq_x_n_s8(__a, __b, __p) __arm_vbrsrq_x_n_s8(__a, __b, __p)
+#define vbrsrq_x_n_s16(__a, __b, __p) __arm_vbrsrq_x_n_s16(__a, __b, __p)
+#define vbrsrq_x_n_s32(__a, __b, __p) __arm_vbrsrq_x_n_s32(__a, __b, __p)
+#define vbrsrq_x_n_u8(__a, __b, __p) __arm_vbrsrq_x_n_u8(__a, __b, __p)
+#define vbrsrq_x_n_u16(__a, __b, __p) __arm_vbrsrq_x_n_u16(__a, __b, __p)
+#define vbrsrq_x_n_u32(__a, __b, __p) __arm_vbrsrq_x_n_u32(__a, __b, __p)
+#define veorq_x_s8(__a, __b, __p) __arm_veorq_x_s8(__a, __b, __p)
+#define veorq_x_s16(__a, __b, __p) __arm_veorq_x_s16(__a, __b, __p)
+#define veorq_x_s32(__a, __b, __p) __arm_veorq_x_s32(__a, __b, __p)
+#define veorq_x_u8(__a, __b, __p) __arm_veorq_x_u8(__a, __b, __p)
+#define veorq_x_u16(__a, __b, __p) __arm_veorq_x_u16(__a, __b, __p)
+#define veorq_x_u32(__a, __b, __p) __arm_veorq_x_u32(__a, __b, __p)
+#define vmovlbq_x_s8(__a, __p) __arm_vmovlbq_x_s8(__a, __p)
+#define vmovlbq_x_s16(__a, __p) __arm_vmovlbq_x_s16(__a, __p)
+#define vmovlbq_x_u8(__a, __p) __arm_vmovlbq_x_u8(__a, __p)
+#define vmovlbq_x_u16(__a, __p) __arm_vmovlbq_x_u16(__a, __p)
+#define vmovltq_x_s8(__a, __p) __arm_vmovltq_x_s8(__a, __p)
+#define vmovltq_x_s16(__a, __p) __arm_vmovltq_x_s16(__a, __p)
+#define vmovltq_x_u8(__a, __p) __arm_vmovltq_x_u8(__a, __p)
+#define vmovltq_x_u16(__a, __p) __arm_vmovltq_x_u16(__a, __p)
+#define vmvnq_x_s8(__a, __p) __arm_vmvnq_x_s8(__a, __p)
+#define vmvnq_x_s16(__a, __p) __arm_vmvnq_x_s16(__a, __p)
+#define vmvnq_x_s32(__a, __p) __arm_vmvnq_x_s32(__a, __p)
+#define vmvnq_x_u8(__a, __p) __arm_vmvnq_x_u8(__a, __p)
+#define vmvnq_x_u16(__a, __p) __arm_vmvnq_x_u16(__a, __p)
+#define vmvnq_x_u32(__a, __p) __arm_vmvnq_x_u32(__a, __p)
+#define vmvnq_x_n_s16( __imm, __p) __arm_vmvnq_x_n_s16( __imm, __p)
+#define vmvnq_x_n_s32( __imm, __p) __arm_vmvnq_x_n_s32( __imm, __p)
+#define vmvnq_x_n_u16( __imm, __p) __arm_vmvnq_x_n_u16( __imm, __p)
+#define vmvnq_x_n_u32( __imm, __p) __arm_vmvnq_x_n_u32( __imm, __p)
+#define vornq_x_s8(__a, __b, __p) __arm_vornq_x_s8(__a, __b, __p)
+#define vornq_x_s16(__a, __b, __p) __arm_vornq_x_s16(__a, __b, __p)
+#define vornq_x_s32(__a, __b, __p) __arm_vornq_x_s32(__a, __b, __p)
+#define vornq_x_u8(__a, __b, __p) __arm_vornq_x_u8(__a, __b, __p)
+#define vornq_x_u16(__a, __b, __p) __arm_vornq_x_u16(__a, __b, __p)
+#define vornq_x_u32(__a, __b, __p) __arm_vornq_x_u32(__a, __b, __p)
+#define vorrq_x_s8(__a, __b, __p) __arm_vorrq_x_s8(__a, __b, __p)
+#define vorrq_x_s16(__a, __b, __p) __arm_vorrq_x_s16(__a, __b, __p)
+#define vorrq_x_s32(__a, __b, __p) __arm_vorrq_x_s32(__a, __b, __p)
+#define vorrq_x_u8(__a, __b, __p) __arm_vorrq_x_u8(__a, __b, __p)
+#define vorrq_x_u16(__a, __b, __p) __arm_vorrq_x_u16(__a, __b, __p)
+#define vorrq_x_u32(__a, __b, __p) __arm_vorrq_x_u32(__a, __b, __p)
+#define vrev16q_x_s8(__a, __p) __arm_vrev16q_x_s8(__a, __p)
+#define vrev16q_x_u8(__a, __p) __arm_vrev16q_x_u8(__a, __p)
+#define vrev32q_x_s8(__a, __p) __arm_vrev32q_x_s8(__a, __p)
+#define vrev32q_x_s16(__a, __p) __arm_vrev32q_x_s16(__a, __p)
+#define vrev32q_x_u8(__a, __p) __arm_vrev32q_x_u8(__a, __p)
+#define vrev32q_x_u16(__a, __p) __arm_vrev32q_x_u16(__a, __p)
+#define vrev64q_x_s8(__a, __p) __arm_vrev64q_x_s8(__a, __p)
+#define vrev64q_x_s16(__a, __p) __arm_vrev64q_x_s16(__a, __p)
+#define vrev64q_x_s32(__a, __p) __arm_vrev64q_x_s32(__a, __p)
+#define vrev64q_x_u8(__a, __p) __arm_vrev64q_x_u8(__a, __p)
+#define vrev64q_x_u16(__a, __p) __arm_vrev64q_x_u16(__a, __p)
+#define vrev64q_x_u32(__a, __p) __arm_vrev64q_x_u32(__a, __p)
+#define vrshlq_x_s8(__a, __b, __p) __arm_vrshlq_x_s8(__a, __b, __p)
+#define vrshlq_x_s16(__a, __b, __p) __arm_vrshlq_x_s16(__a, __b, __p)
+#define vrshlq_x_s32(__a, __b, __p) __arm_vrshlq_x_s32(__a, __b, __p)
+#define vrshlq_x_u8(__a, __b, __p) __arm_vrshlq_x_u8(__a, __b, __p)
+#define vrshlq_x_u16(__a, __b, __p) __arm_vrshlq_x_u16(__a, __b, __p)
+#define vrshlq_x_u32(__a, __b, __p) __arm_vrshlq_x_u32(__a, __b, __p)
+#define vshllbq_x_n_s8(__a, __imm, __p) __arm_vshllbq_x_n_s8(__a, __imm, __p)
+#define vshllbq_x_n_s16(__a, __imm, __p) __arm_vshllbq_x_n_s16(__a, __imm, __p)
+#define vshllbq_x_n_u8(__a, __imm, __p) __arm_vshllbq_x_n_u8(__a, __imm, __p)
+#define vshllbq_x_n_u16(__a, __imm, __p) __arm_vshllbq_x_n_u16(__a, __imm, __p)
+#define vshlltq_x_n_s8(__a, __imm, __p) __arm_vshlltq_x_n_s8(__a, __imm, __p)
+#define vshlltq_x_n_s16(__a, __imm, __p) __arm_vshlltq_x_n_s16(__a, __imm, __p)
+#define vshlltq_x_n_u8(__a, __imm, __p) __arm_vshlltq_x_n_u8(__a, __imm, __p)
+#define vshlltq_x_n_u16(__a, __imm, __p) __arm_vshlltq_x_n_u16(__a, __imm, __p)
+#define vshlq_x_s8(__a, __b, __p) __arm_vshlq_x_s8(__a, __b, __p)
+#define vshlq_x_s16(__a, __b, __p) __arm_vshlq_x_s16(__a, __b, __p)
+#define vshlq_x_s32(__a, __b, __p) __arm_vshlq_x_s32(__a, __b, __p)
+#define vshlq_x_u8(__a, __b, __p) __arm_vshlq_x_u8(__a, __b, __p)
+#define vshlq_x_u16(__a, __b, __p) __arm_vshlq_x_u16(__a, __b, __p)
+#define vshlq_x_u32(__a, __b, __p) __arm_vshlq_x_u32(__a, __b, __p)
+#define vshlq_x_n_s8(__a, __imm, __p) __arm_vshlq_x_n_s8(__a, __imm, __p)
+#define vshlq_x_n_s16(__a, __imm, __p) __arm_vshlq_x_n_s16(__a, __imm, __p)
+#define vshlq_x_n_s32(__a, __imm, __p) __arm_vshlq_x_n_s32(__a, __imm, __p)
+#define vshlq_x_n_u8(__a, __imm, __p) __arm_vshlq_x_n_u8(__a, __imm, __p)
+#define vshlq_x_n_u16(__a, __imm, __p) __arm_vshlq_x_n_u16(__a, __imm, __p)
+#define vshlq_x_n_u32(__a, __imm, __p) __arm_vshlq_x_n_u32(__a, __imm, __p)
+#define vrshrq_x_n_s8(__a, __imm, __p) __arm_vrshrq_x_n_s8(__a, __imm, __p)
+#define vrshrq_x_n_s16(__a, __imm, __p) __arm_vrshrq_x_n_s16(__a, __imm, __p)
+#define vrshrq_x_n_s32(__a, __imm, __p) __arm_vrshrq_x_n_s32(__a, __imm, __p)
+#define vrshrq_x_n_u8(__a, __imm, __p) __arm_vrshrq_x_n_u8(__a, __imm, __p)
+#define vrshrq_x_n_u16(__a, __imm, __p) __arm_vrshrq_x_n_u16(__a, __imm, __p)
+#define vrshrq_x_n_u32(__a, __imm, __p) __arm_vrshrq_x_n_u32(__a, __imm, __p)
+#define vshrq_x_n_s8(__a, __imm, __p) __arm_vshrq_x_n_s8(__a, __imm, __p)
+#define vshrq_x_n_s16(__a, __imm, __p) __arm_vshrq_x_n_s16(__a, __imm, __p)
+#define vshrq_x_n_s32(__a, __imm, __p) __arm_vshrq_x_n_s32(__a, __imm, __p)
+#define vshrq_x_n_u8(__a, __imm, __p) __arm_vshrq_x_n_u8(__a, __imm, __p)
+#define vshrq_x_n_u16(__a, __imm, __p) __arm_vshrq_x_n_u16(__a, __imm, __p)
+#define vshrq_x_n_u32(__a, __imm, __p) __arm_vshrq_x_n_u32(__a, __imm, __p)
+#define vdupq_x_n_f16(__a, __p) __arm_vdupq_x_n_f16(__a, __p)
+#define vdupq_x_n_f32(__a, __p) __arm_vdupq_x_n_f32(__a, __p)
+#define vminnmq_x_f16(__a, __b, __p) __arm_vminnmq_x_f16(__a, __b, __p)
+#define vminnmq_x_f32(__a, __b, __p) __arm_vminnmq_x_f32(__a, __b, __p)
+#define vmaxnmq_x_f16(__a, __b, __p) __arm_vmaxnmq_x_f16(__a, __b, __p)
+#define vmaxnmq_x_f32(__a, __b, __p) __arm_vmaxnmq_x_f32(__a, __b, __p)
+#define vabdq_x_f16(__a, __b, __p) __arm_vabdq_x_f16(__a, __b, __p)
+#define vabdq_x_f32(__a, __b, __p) __arm_vabdq_x_f32(__a, __b, __p)
+#define vabsq_x_f16(__a, __p) __arm_vabsq_x_f16(__a, __p)
+#define vabsq_x_f32(__a, __p) __arm_vabsq_x_f32(__a, __p)
+#define vaddq_x_f16(__a, __b, __p) __arm_vaddq_x_f16(__a, __b, __p)
+#define vaddq_x_f32(__a, __b, __p) __arm_vaddq_x_f32(__a, __b, __p)
+#define vaddq_x_n_f16(__a, __b, __p) __arm_vaddq_x_n_f16(__a, __b, __p)
+#define vaddq_x_n_f32(__a, __b, __p) __arm_vaddq_x_n_f32(__a, __b, __p)
+#define vnegq_x_f16(__a, __p) __arm_vnegq_x_f16(__a, __p)
+#define vnegq_x_f32(__a, __p) __arm_vnegq_x_f32(__a, __p)
+#define vmulq_x_f16(__a, __b, __p) __arm_vmulq_x_f16(__a, __b, __p)
+#define vmulq_x_f32(__a, __b, __p) __arm_vmulq_x_f32(__a, __b, __p)
+#define vmulq_x_n_f16(__a, __b, __p) __arm_vmulq_x_n_f16(__a, __b, __p)
+#define vmulq_x_n_f32(__a, __b, __p) __arm_vmulq_x_n_f32(__a, __b, __p)
+#define vsubq_x_f16(__a, __b, __p) __arm_vsubq_x_f16(__a, __b, __p)
+#define vsubq_x_f32(__a, __b, __p) __arm_vsubq_x_f32(__a, __b, __p)
+#define vsubq_x_n_f16(__a, __b, __p) __arm_vsubq_x_n_f16(__a, __b, __p)
+#define vsubq_x_n_f32(__a, __b, __p) __arm_vsubq_x_n_f32(__a, __b, __p)
+#define vcaddq_rot90_x_f16(__a, __b, __p) __arm_vcaddq_rot90_x_f16(__a, __b, __p)
+#define vcaddq_rot90_x_f32(__a, __b, __p) __arm_vcaddq_rot90_x_f32(__a, __b, __p)
+#define vcaddq_rot270_x_f16(__a, __b, __p) __arm_vcaddq_rot270_x_f16(__a, __b, __p)
+#define vcaddq_rot270_x_f32(__a, __b, __p) __arm_vcaddq_rot270_x_f32(__a, __b, __p)
+#define vcmulq_x_f16(__a, __b, __p) __arm_vcmulq_x_f16(__a, __b, __p)
+#define vcmulq_x_f32(__a, __b, __p) __arm_vcmulq_x_f32(__a, __b, __p)
+#define vcmulq_rot90_x_f16(__a, __b, __p) __arm_vcmulq_rot90_x_f16(__a, __b, __p)
+#define vcmulq_rot90_x_f32(__a, __b, __p) __arm_vcmulq_rot90_x_f32(__a, __b, __p)
+#define vcmulq_rot180_x_f16(__a, __b, __p) __arm_vcmulq_rot180_x_f16(__a, __b, __p)
+#define vcmulq_rot180_x_f32(__a, __b, __p) __arm_vcmulq_rot180_x_f32(__a, __b, __p)
+#define vcmulq_rot270_x_f16(__a, __b, __p) __arm_vcmulq_rot270_x_f16(__a, __b, __p)
+#define vcmulq_rot270_x_f32(__a, __b, __p) __arm_vcmulq_rot270_x_f32(__a, __b, __p)
+#define vcvtaq_x_s16_f16(__a, __p) __arm_vcvtaq_x_s16_f16(__a, __p)
+#define vcvtaq_x_s32_f32(__a, __p) __arm_vcvtaq_x_s32_f32(__a, __p)
+#define vcvtaq_x_u16_f16(__a, __p) __arm_vcvtaq_x_u16_f16(__a, __p)
+#define vcvtaq_x_u32_f32(__a, __p) __arm_vcvtaq_x_u32_f32(__a, __p)
+#define vcvtnq_x_s16_f16(__a, __p) __arm_vcvtnq_x_s16_f16(__a, __p)
+#define vcvtnq_x_s32_f32(__a, __p) __arm_vcvtnq_x_s32_f32(__a, __p)
+#define vcvtnq_x_u16_f16(__a, __p) __arm_vcvtnq_x_u16_f16(__a, __p)
+#define vcvtnq_x_u32_f32(__a, __p) __arm_vcvtnq_x_u32_f32(__a, __p)
+#define vcvtpq_x_s16_f16(__a, __p) __arm_vcvtpq_x_s16_f16(__a, __p)
+#define vcvtpq_x_s32_f32(__a, __p) __arm_vcvtpq_x_s32_f32(__a, __p)
+#define vcvtpq_x_u16_f16(__a, __p) __arm_vcvtpq_x_u16_f16(__a, __p)
+#define vcvtpq_x_u32_f32(__a, __p) __arm_vcvtpq_x_u32_f32(__a, __p)
+#define vcvtmq_x_s16_f16(__a, __p) __arm_vcvtmq_x_s16_f16(__a, __p)
+#define vcvtmq_x_s32_f32(__a, __p) __arm_vcvtmq_x_s32_f32(__a, __p)
+#define vcvtmq_x_u16_f16(__a, __p) __arm_vcvtmq_x_u16_f16(__a, __p)
+#define vcvtmq_x_u32_f32(__a, __p) __arm_vcvtmq_x_u32_f32(__a, __p)
+#define vcvtbq_x_f32_f16(__a, __p) __arm_vcvtbq_x_f32_f16(__a, __p)
+#define vcvttq_x_f32_f16(__a, __p) __arm_vcvttq_x_f32_f16(__a, __p)
+#define vcvtq_x_f16_u16(__a, __p) __arm_vcvtq_x_f16_u16(__a, __p)
+#define vcvtq_x_f16_s16(__a, __p) __arm_vcvtq_x_f16_s16(__a, __p)
+#define vcvtq_x_f32_s32(__a, __p) __arm_vcvtq_x_f32_s32(__a, __p)
+#define vcvtq_x_f32_u32(__a, __p) __arm_vcvtq_x_f32_u32(__a, __p)
+#define vcvtq_x_n_f16_s16(__a, __imm6, __p) __arm_vcvtq_x_n_f16_s16(__a, __imm6, __p)
+#define vcvtq_x_n_f16_u16(__a, __imm6, __p) __arm_vcvtq_x_n_f16_u16(__a, __imm6, __p)
+#define vcvtq_x_n_f32_s32(__a, __imm6, __p) __arm_vcvtq_x_n_f32_s32(__a, __imm6, __p)
+#define vcvtq_x_n_f32_u32(__a, __imm6, __p) __arm_vcvtq_x_n_f32_u32(__a, __imm6, __p)
+#define vcvtq_x_s16_f16(__a, __p) __arm_vcvtq_x_s16_f16(__a, __p)
+#define vcvtq_x_s32_f32(__a, __p) __arm_vcvtq_x_s32_f32(__a, __p)
+#define vcvtq_x_u16_f16(__a, __p) __arm_vcvtq_x_u16_f16(__a, __p)
+#define vcvtq_x_u32_f32(__a, __p) __arm_vcvtq_x_u32_f32(__a, __p)
+#define vcvtq_x_n_s16_f16(__a, __imm6, __p) __arm_vcvtq_x_n_s16_f16(__a, __imm6, __p)
+#define vcvtq_x_n_s32_f32(__a, __imm6, __p) __arm_vcvtq_x_n_s32_f32(__a, __imm6, __p)
+#define vcvtq_x_n_u16_f16(__a, __imm6, __p) __arm_vcvtq_x_n_u16_f16(__a, __imm6, __p)
+#define vcvtq_x_n_u32_f32(__a, __imm6, __p) __arm_vcvtq_x_n_u32_f32(__a, __imm6, __p)
+#define vrndq_x_f16(__a, __p) __arm_vrndq_x_f16(__a, __p)
+#define vrndq_x_f32(__a, __p) __arm_vrndq_x_f32(__a, __p)
+#define vrndnq_x_f16(__a, __p) __arm_vrndnq_x_f16(__a, __p)
+#define vrndnq_x_f32(__a, __p) __arm_vrndnq_x_f32(__a, __p)
+#define vrndmq_x_f16(__a, __p) __arm_vrndmq_x_f16(__a, __p)
+#define vrndmq_x_f32(__a, __p) __arm_vrndmq_x_f32(__a, __p)
+#define vrndpq_x_f16(__a, __p) __arm_vrndpq_x_f16(__a, __p)
+#define vrndpq_x_f32(__a, __p) __arm_vrndpq_x_f32(__a, __p)
+#define vrndaq_x_f16(__a, __p) __arm_vrndaq_x_f16(__a, __p)
+#define vrndaq_x_f32(__a, __p) __arm_vrndaq_x_f32(__a, __p)
+#define vrndxq_x_f16(__a, __p) __arm_vrndxq_x_f16(__a, __p)
+#define vrndxq_x_f32(__a, __p) __arm_vrndxq_x_f32(__a, __p)
+#define vandq_x_f16(__a, __b, __p) __arm_vandq_x_f16(__a, __b, __p)
+#define vandq_x_f32(__a, __b, __p) __arm_vandq_x_f32(__a, __b, __p)
+#define vbicq_x_f16(__a, __b, __p) __arm_vbicq_x_f16(__a, __b, __p)
+#define vbicq_x_f32(__a, __b, __p) __arm_vbicq_x_f32(__a, __b, __p)
+#define vbrsrq_x_n_f16(__a, __b, __p) __arm_vbrsrq_x_n_f16(__a, __b, __p)
+#define vbrsrq_x_n_f32(__a, __b, __p) __arm_vbrsrq_x_n_f32(__a, __b, __p)
+#define veorq_x_f16(__a, __b, __p) __arm_veorq_x_f16(__a, __b, __p)
+#define veorq_x_f32(__a, __b, __p) __arm_veorq_x_f32(__a, __b, __p)
+#define vornq_x_f16(__a, __b, __p) __arm_vornq_x_f16(__a, __b, __p)
+#define vornq_x_f32(__a, __b, __p) __arm_vornq_x_f32(__a, __b, __p)
+#define vorrq_x_f16(__a, __b, __p) __arm_vorrq_x_f16(__a, __b, __p)
+#define vorrq_x_f32(__a, __b, __p) __arm_vorrq_x_f32(__a, __b, __p)
+#define vrev32q_x_f16(__a, __p) __arm_vrev32q_x_f16(__a, __p)
+#define vrev64q_x_f16(__a, __p) __arm_vrev64q_x_f16(__a, __p)
+#define vrev64q_x_f32(__a, __p) __arm_vrev64q_x_f32(__a, __p)
#endif
__extension__ extern __inline void
@@ -13552,6 +13928,1995 @@ __arm_vstrwq_scatter_base_wb_u32 (uint32x4_t * __addr, const int __offset, uint3
__builtin_mve_vstrwq_scatter_base_wb_add_uv4si (*__addr, __offset, *__addr);
}
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_x_n_u8 (uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vddupq_m_n_uv16qi (vuninitializedq_u8 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_x_n_u16 (uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vddupq_m_n_uv8hi (vuninitializedq_u16 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_x_n_u32 (uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vddupq_m_n_uv4si (vuninitializedq_u32 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_x_wb_u8 (uint32_t *__a, const int __imm, mve_pred16_t __p)
+{
+ uint8x16_t __arg1 = vuninitializedq_u8 ();
+ uint8x16_t __res = __builtin_mve_vddupq_m_n_uv16qi (__arg1, * __a, __imm, __p);
+ *__a -= __imm * 16u;
+ return __res;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_x_wb_u16 (uint32_t *__a, const int __imm, mve_pred16_t __p)
+{
+ uint16x8_t __arg1 = vuninitializedq_u16 ();
+ uint16x8_t __res = __builtin_mve_vddupq_m_n_uv8hi (__arg1, *__a, __imm, __p);
+ *__a -= __imm * 8u;
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vddupq_x_wb_u32 (uint32_t *__a, const int __imm, mve_pred16_t __p)
+{
+ uint32x4_t __arg1 = vuninitializedq_u32 ();
+ uint32x4_t __res = __builtin_mve_vddupq_m_n_uv4si (__arg1, *__a, __imm, __p);
+ *__a -= __imm * 4u;
+ return __res;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_x_n_u8 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vdwdupq_m_n_uv16qi (vuninitializedq_u8 (), __a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_x_n_u16 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vdwdupq_m_n_uv8hi (vuninitializedq_u16 (), __a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_x_n_u32 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vdwdupq_m_n_uv4si (vuninitializedq_u32 (), __a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_x_wb_u8 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint8x16_t __arg1 = vuninitializedq_u8 ();
+ uint8x16_t __res = __builtin_mve_vdwdupq_m_n_uv16qi (__arg1, *__a, __b, __imm, __p);
+ *__a = __builtin_mve_vdwdupq_m_wb_uv16qi (__arg1, *__a, __b, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_x_wb_u16 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint16x8_t __arg1 = vuninitializedq_u16 ();
+ uint16x8_t __res = __builtin_mve_vdwdupq_m_n_uv8hi (__arg1, *__a, __b, __imm, __p);
+ *__a = __builtin_mve_vdwdupq_m_wb_uv8hi (__arg1, *__a, __b, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdwdupq_x_wb_u32 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint32x4_t __arg1 = vuninitializedq_u32 ();
+ uint32x4_t __res = __builtin_mve_vdwdupq_m_n_uv4si (__arg1, *__a, __b, __imm, __p);
+ *__a = __builtin_mve_vdwdupq_m_wb_uv4si (__arg1, *__a, __b, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_x_n_u8 (uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vidupq_m_n_uv16qi (vuninitializedq_u8 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_x_n_u16 (uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vidupq_m_n_uv8hi (vuninitializedq_u16 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_x_n_u32 (uint32_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vidupq_m_n_uv4si (vuninitializedq_u32 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_x_wb_u8 (uint32_t *__a, const int __imm, mve_pred16_t __p)
+{
+ uint8x16_t __arg1 = vuninitializedq_u8 ();
+ uint8x16_t __res = __builtin_mve_vidupq_m_n_uv16qi (__arg1, *__a, __imm, __p);
+ *__a += __imm * 16u;
+ return __res;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_x_wb_u16 (uint32_t *__a, const int __imm, mve_pred16_t __p)
+{
+ uint16x8_t __arg1 = vuninitializedq_u16 ();
+ uint16x8_t __res = __builtin_mve_vidupq_m_n_uv8hi (__arg1, *__a, __imm, __p);
+ *__a += __imm * 8u;
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vidupq_x_wb_u32 (uint32_t *__a, const int __imm, mve_pred16_t __p)
+{
+ uint32x4_t __arg1 = vuninitializedq_u32 ();
+ uint32x4_t __res = __builtin_mve_vidupq_m_n_uv4si (__arg1, *__a, __imm, __p);
+ *__a += __imm * 4u;
+ return __res;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_x_n_u8 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_viwdupq_m_n_uv16qi (vuninitializedq_u8 (), __a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_x_n_u16 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_viwdupq_m_n_uv8hi (vuninitializedq_u16 (), __a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_x_n_u32 (uint32_t __a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_viwdupq_m_n_uv4si (vuninitializedq_u32 (), __a, __b, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_x_wb_u8 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint8x16_t __arg1 = vuninitializedq_u8 ();
+ uint8x16_t __res = __builtin_mve_viwdupq_m_n_uv16qi (__arg1, *__a, __b, __imm, __p);
+ *__a = __builtin_mve_viwdupq_m_wb_uv16qi (__arg1, *__a, __b, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_x_wb_u16 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint16x8_t __arg1 = vuninitializedq_u16 ();
+ uint16x8_t __res = __builtin_mve_viwdupq_m_n_uv8hi (__arg1, *__a, __b, __imm, __p);
+ *__a = __builtin_mve_viwdupq_m_wb_uv8hi (__arg1, *__a, __b, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_viwdupq_x_wb_u32 (uint32_t *__a, uint32_t __b, const int __imm, mve_pred16_t __p)
+{
+ uint32x4_t __arg1 = vuninitializedq_u32 ();
+ uint32x4_t __res = __builtin_mve_viwdupq_m_n_uv4si (__arg1, *__a, __b, __imm, __p);
+ *__a = __builtin_mve_viwdupq_m_wb_uv4si (__arg1, *__a, __b, __imm, __p);
+ return __res;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_x_n_s8 (int8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vdupq_m_n_sv16qi (vuninitializedq_s8 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_x_n_s16 (int16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vdupq_m_n_sv8hi (vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_x_n_s32 (int32_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vdupq_m_n_sv4si (vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_x_n_u8 (uint8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vdupq_m_n_uv16qi (vuninitializedq_u8 (), __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_x_n_u16 (uint16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vdupq_m_n_uv8hi (vuninitializedq_u16 (), __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_x_n_u32 (uint32_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vdupq_m_n_uv4si (vuninitializedq_u32 (), __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vabdq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vabdq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vabdq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vabdq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vabdq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vabdq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_x_s8 (int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vabsq_m_sv16qi (vuninitializedq_s8 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_x_s16 (int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vabsq_m_sv8hi (vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_x_s32 (int32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vabsq_m_sv4si (vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_n_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_n_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_n_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_n_uv16qi (vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_n_uv8hi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_n_uv4si (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclsq_x_s8 (int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vclsq_m_sv16qi (vuninitializedq_s8 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclsq_x_s16 (int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vclsq_m_sv8hi (vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclsq_x_s32 (int32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vclsq_m_sv4si (vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_x_s8 (int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vclzq_m_sv16qi (vuninitializedq_s8 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_x_s16 (int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vclzq_m_sv8hi (vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_x_s32 (int32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vclzq_m_sv4si (vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_x_u8 (uint8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vclzq_m_uv16qi (vuninitializedq_u8 (), __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_x_u16 (uint16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vclzq_m_uv8hi (vuninitializedq_u16 (), __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vclzq_x_u32 (uint32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vclzq_m_uv4si (vuninitializedq_u32 (), __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_x_s8 (int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vnegq_m_sv16qi (vuninitializedq_s8 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_x_s16 (int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vnegq_m_sv8hi (vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_x_s32 (int32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vnegq_m_sv4si (vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulhq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulhq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulhq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulhq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulhq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulhq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulhq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_poly_x_p8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmullbq_poly_m_pv16qi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_poly_x_p16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmullbq_poly_m_pv8hi (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmullbq_int_m_sv16qi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmullbq_int_m_sv8hi (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmullbq_int_m_sv4si (vuninitializedq_s64 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmullbq_int_m_uv16qi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmullbq_int_m_uv8hi (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmullbq_int_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmullbq_int_m_uv4si (vuninitializedq_u64 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_poly_x_p8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulltq_poly_m_pv16qi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_poly_x_p16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulltq_poly_m_pv8hi (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulltq_int_m_sv16qi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulltq_int_m_sv8hi (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulltq_int_m_sv4si (vuninitializedq_s64 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulltq_int_m_uv16qi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulltq_int_m_uv8hi (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulltq_int_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulltq_int_m_uv4si (vuninitializedq_u64 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_n_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_n_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_n_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_n_uv16qi (vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_n_uv8hi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_n_uv4si (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_n_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_n_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_n_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_n_uv16qi (vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_n_uv8hi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_n_uv4si (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot90_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot90_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot90_m_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot90_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot90_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot90_m_uv4si (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot270_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot270_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot270_m_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot270_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot270_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot270_m_uv4si (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_n_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_n_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_n_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_n_uv16qi (vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_n_uv8hi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_n_uv4si (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhaddq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhaddq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot90_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhcaddq_rot90_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot90_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhcaddq_rot90_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot90_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhcaddq_rot90_m_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot270_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhcaddq_rot270_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot270_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhcaddq_rot270_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhcaddq_rot270_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhcaddq_rot270_m_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x_n_s8 (int8x16_t __a, int8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_n_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x_n_s16 (int16x8_t __a, int16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_n_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_n_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x_n_u8 (uint8x16_t __a, uint8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_n_uv16qi (vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x_n_u16 (uint16x8_t __a, uint16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_n_uv8hi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x_n_u32 (uint32x4_t __a, uint32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_n_uv4si (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vhsubq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vhsubq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrhaddq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrhaddq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrhaddq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrhaddq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrhaddq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrhaddq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrhaddq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmulhq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmulhq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmulhq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmulhq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmulhq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrmulhq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrmulhq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vandq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vandq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vandq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vandq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vandq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vandq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_x_n_s8 (int8x16_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbrsrq_m_n_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_x_n_s16 (int16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbrsrq_m_n_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_x_n_s32 (int32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbrsrq_m_n_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_x_n_u8 (uint8x16_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbrsrq_m_n_uv16qi (vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_x_n_u16 (uint16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbrsrq_m_n_uv8hi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_x_n_u32 (uint32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbrsrq_m_n_uv4si (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_veorq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_veorq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_veorq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_veorq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_veorq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_veorq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq_x_s8 (int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovlbq_m_sv16qi (vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq_x_s16 (int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovlbq_m_sv8hi (vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq_x_u8 (uint8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovlbq_m_uv16qi (vuninitializedq_u16 (), __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovlbq_x_u16 (uint16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovlbq_m_uv8hi (vuninitializedq_u32 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq_x_s8 (int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovltq_m_sv16qi (vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq_x_s16 (int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovltq_m_sv8hi (vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq_x_u8 (uint8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovltq_m_uv16qi (vuninitializedq_u16 (), __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmovltq_x_u16 (uint16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmovltq_m_uv8hi (vuninitializedq_u32 (), __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_x_s8 (int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_sv16qi (vuninitializedq_s8 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_x_s16 (int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_sv8hi (vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_x_s32 (int32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_sv4si (vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_x_u8 (uint8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_uv16qi (vuninitializedq_u8 (), __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_x_u16 (uint16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_uv8hi (vuninitializedq_u16 (), __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_x_u32 (uint32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_uv4si (vuninitializedq_u32 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_x_n_s16 (const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_n_sv8hi (vuninitializedq_s16 (), __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_x_n_s32 (const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_n_sv4si (vuninitializedq_s32 (), __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_x_n_u16 (const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_n_uv8hi (vuninitializedq_u16 (), __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmvnq_x_n_u32 (const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vmvnq_m_n_uv4si (vuninitializedq_u32 (), __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vornq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vornq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vornq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vornq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vornq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vornq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_x_u8 (uint8x16_t __a, uint8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_x_u16 (uint16x8_t __a, uint16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_x_u32 (uint32x4_t __a, uint32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev16q_x_s8 (int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev16q_m_sv16qi (vuninitializedq_s8 (), __a, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev16q_x_u8 (uint8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev16q_m_uv16qi (vuninitializedq_u8 (), __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_x_s8 (int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev32q_m_sv16qi (vuninitializedq_s8 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_x_s16 (int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev32q_m_sv8hi (vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_x_u8 (uint8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev32q_m_uv16qi (vuninitializedq_u8 (), __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_x_u16 (uint16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev32q_m_uv8hi (vuninitializedq_u16 (), __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_x_s8 (int8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev64q_m_sv16qi (vuninitializedq_s8 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_x_s16 (int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev64q_m_sv8hi (vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_x_s32 (int32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev64q_m_sv4si (vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_x_u8 (uint8x16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev64q_m_uv16qi (vuninitializedq_u8 (), __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_x_u16 (uint16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev64q_m_uv8hi (vuninitializedq_u16 (), __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_x_u32 (uint32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev64q_m_uv4si (vuninitializedq_u32 (), __a, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshlq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshlq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshlq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_x_u8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshlq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_x_u16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshlq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshlq_x_u32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshlq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshllbq_m_n_sv16qi (vuninitializedq_s16 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshllbq_m_n_sv8hi (vuninitializedq_s32 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshllbq_m_n_uv16qi (vuninitializedq_u16 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshllbq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshllbq_m_n_uv8hi (vuninitializedq_u32 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlltq_m_n_sv16qi (vuninitializedq_s16 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlltq_m_n_sv8hi (vuninitializedq_s32 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlltq_m_n_uv16qi (vuninitializedq_u16 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlltq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlltq_m_n_uv8hi (vuninitializedq_u32 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_s8 (int8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_sv16qi (vuninitializedq_s8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_s16 (int16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_sv8hi (vuninitializedq_s16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_s32 (int32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_sv4si (vuninitializedq_s32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_u8 (uint8x16_t __a, int8x16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_uv16qi (vuninitializedq_u8 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_u16 (uint16x8_t __a, int16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_uv8hi (vuninitializedq_u16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_u32 (uint32x4_t __a, int32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_uv4si (vuninitializedq_u32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_n_sv16qi (vuninitializedq_s8 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_n_sv8hi (vuninitializedq_s16 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_n_sv4si (vuninitializedq_s32 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_n_uv16qi (vuninitializedq_u8 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_n_uv8hi (vuninitializedq_u16 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlq_x_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshlq_m_n_uv4si (vuninitializedq_u32 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshrq_m_n_sv16qi (vuninitializedq_s8 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshrq_m_n_sv8hi (vuninitializedq_s16 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_x_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshrq_m_n_sv4si (vuninitializedq_s32 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshrq_m_n_uv16qi (vuninitializedq_u8 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshrq_m_n_uv8hi (vuninitializedq_u16 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrshrq_x_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vrshrq_m_n_uv4si (vuninitializedq_u32 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_x_n_s8 (int8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshrq_m_n_sv16qi (vuninitializedq_s8 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_x_n_s16 (int16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshrq_m_n_sv8hi (vuninitializedq_s16 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_x_n_s32 (int32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshrq_m_n_sv4si (vuninitializedq_s32 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_x_n_u8 (uint8x16_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshrq_m_n_uv16qi (vuninitializedq_u8 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_x_n_u16 (uint16x8_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshrq_m_n_uv8hi (vuninitializedq_u16 (), __a, __imm, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshrq_x_n_u32 (uint32x4_t __a, const int __imm, mve_pred16_t __p)
+{
+ return __builtin_mve_vshrq_m_n_uv4si (vuninitializedq_u32 (), __a, __imm, __p);
+}
+
#if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */
__extension__ extern __inline void
@@ -16224,6 +18589,685 @@ __arm_vstrwq_scatter_base_wb_p_f32 (uint32x4_t * __addr, const int __offset, flo
__builtin_mve_vstrwq_scatter_base_wb_p_add_fv4sf (*__addr, __offset, *__addr, __p);
}
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_x_n_f16 (float16_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vdupq_m_n_fv8hf (vuninitializedq_f16 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vdupq_x_n_f32 (float32_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vdupq_m_n_fv4sf (vuninitializedq_f32 (), __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminnmq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vminnmq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vminnmq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxnmq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmaxnmq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmaxnmq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vabdq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabdq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vabdq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_x_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vabsq_m_fv8hf (vuninitializedq_f16 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vabsq_x_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vabsq_m_fv4sf (vuninitializedq_f32 (), __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_n_fv8hf (vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_x_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vaddq_m_n_fv4sf (vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_x_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vnegq_m_fv8hf (vuninitializedq_f16 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vnegq_x_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vnegq_m_fv4sf (vuninitializedq_f32 (), __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_n_fv8hf (vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vmulq_x_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vmulq_m_n_fv4sf (vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_n_f16 (float16x8_t __a, float16_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_n_fv8hf (vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vsubq_x_n_f32 (float32x4_t __a, float32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vsubq_m_n_fv4sf (vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot90_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot90_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot90_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot270_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcaddq_rot270_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcaddq_rot270_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmulq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmulq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot90_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmulq_rot90_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot90_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmulq_rot90_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot180_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmulq_rot180_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot180_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmulq_rot180_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot270_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmulq_rot270_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcmulq_rot270_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vcmulq_rot270_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtaq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtaq_m_sv8hi (vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtaq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtaq_m_sv4si (vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtaq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtaq_m_uv8hi (vuninitializedq_u16 (), __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtaq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtaq_m_uv4si (vuninitializedq_u32 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtnq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtnq_m_sv8hi (vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtnq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtnq_m_sv4si (vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtnq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtnq_m_uv8hi (vuninitializedq_u16 (), __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtnq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtnq_m_uv4si (vuninitializedq_u32 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtpq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtpq_m_sv8hi (vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtpq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtpq_m_sv4si (vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtpq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtpq_m_uv8hi (vuninitializedq_u16 (), __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtpq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtpq_m_uv4si (vuninitializedq_u32 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtmq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtmq_m_sv8hi (vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtmq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtmq_m_sv4si (vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtmq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtmq_m_uv8hi (vuninitializedq_u16 (), __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtmq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtmq_m_uv4si (vuninitializedq_u32 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtbq_x_f32_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtbq_m_f32_f16v4sf (vuninitializedq_f32 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvttq_x_f32_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvttq_m_f32_f16v4sf (vuninitializedq_f32 (), __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_f16_u16 (uint16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_to_f_uv8hf (vuninitializedq_f16 (), __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_f16_s16 (int16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_to_f_sv8hf (vuninitializedq_f16 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_f32_s32 (int32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_to_f_sv4sf (vuninitializedq_f32 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_f32_u32 (uint32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_to_f_uv4sf (vuninitializedq_f32 (), __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_n_f16_s16 (int16x8_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_to_f_sv8hf (vuninitializedq_f16 (), __a, __imm6, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_n_f16_u16 (uint16x8_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_to_f_uv8hf (vuninitializedq_f16 (), __a, __imm6, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_n_f32_s32 (int32x4_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_to_f_sv4sf (vuninitializedq_f32 (), __a, __imm6, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_n_f32_u32 (uint32x4_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_to_f_uv4sf (vuninitializedq_f32 (), __a, __imm6, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_s16_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_from_f_sv8hi (vuninitializedq_s16 (), __a, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_s32_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_from_f_sv4si (vuninitializedq_s32 (), __a, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_u16_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_from_f_uv8hi (vuninitializedq_u16 (), __a, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_u32_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_from_f_uv4si (vuninitializedq_u32 (), __a, __p);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_n_s16_f16 (float16x8_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_from_f_sv8hi (vuninitializedq_s16 (), __a, __imm6, __p);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_n_s32_f32 (float32x4_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_from_f_sv4si (vuninitializedq_s32 (), __a, __imm6, __p);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_n_u16_f16 (float16x8_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_from_f_uv8hi (vuninitializedq_u16 (), __a, __imm6, __p);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vcvtq_x_n_u32_f32 (float32x4_t __a, const int __imm6, mve_pred16_t __p)
+{
+ return __builtin_mve_vcvtq_m_n_from_f_uv4si (vuninitializedq_u32 (), __a, __imm6, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndq_x_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndq_m_fv8hf (vuninitializedq_f16 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndq_x_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndq_m_fv4sf (vuninitializedq_f32 (), __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndnq_x_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndnq_m_fv8hf (vuninitializedq_f16 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndnq_x_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndnq_m_fv4sf (vuninitializedq_f32 (), __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndmq_x_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndmq_m_fv8hf (vuninitializedq_f16 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndmq_x_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndmq_m_fv4sf (vuninitializedq_f32 (), __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndpq_x_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndpq_m_fv8hf (vuninitializedq_f16 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndpq_x_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndpq_m_fv4sf (vuninitializedq_f32 (), __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndaq_x_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndaq_m_fv8hf (vuninitializedq_f16 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndaq_x_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndaq_m_fv4sf (vuninitializedq_f32 (), __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndxq_x_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndxq_m_fv8hf (vuninitializedq_f16 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrndxq_x_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrndxq_m_fv4sf (vuninitializedq_f32 (), __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vandq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vandq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vandq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbicq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbicq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_x_n_f16 (float16x8_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbrsrq_m_n_fv8hf (vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vbrsrq_x_n_f32 (float32x4_t __a, int32_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vbrsrq_m_n_fv4sf (vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_veorq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_veorq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_veorq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vornq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vornq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vornq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_x_f16 (float16x8_t __a, float16x8_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_fv8hf (vuninitializedq_f16 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vorrq_x_f32 (float32x4_t __a, float32x4_t __b, mve_pred16_t __p)
+{
+ return __builtin_mve_vorrq_m_fv4sf (vuninitializedq_f32 (), __a, __b, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev32q_x_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev32q_m_fv8hf (vuninitializedq_f16 (), __a, __p);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_x_f16 (float16x8_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev64q_m_fv8hf (vuninitializedq_f16 (), __a, __p);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vrev64q_x_f32 (float32x4_t __a, mve_pred16_t __p)
+{
+ return __builtin_mve_vrev64q_m_fv4sf (vuninitializedq_f32 (), __a, __p);
+}
+
#endif
enum {
@@ -18069,30 +21113,16 @@ extern void *__ARM_undef;
int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vdupq_m_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16_t), p2), \
int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vdupq_m_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32_t), p2));})
-#define vfmaq_n(p0,p1,p2) __arm_vfmaq_n(p0,p1,p2)
-#define __arm_vfmaq_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
- __typeof(p1) __p1 = (p1); \
- __typeof(p2) __p2 = (p2); \
- _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vfmaq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t)), \
- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vfmaq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t)));})
-
#define vfmaq(p0,p1,p2) __arm_vfmaq(p0,p1,p2)
#define __arm_vfmaq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
__typeof(p1) __p1 = (p1); \
__typeof(p2) __p2 = (p2); \
_Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vfmaq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vfmaq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t)), \
int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmaq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \
int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmaq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));})
-#define vfmasq_n(p0,p1,p2) __arm_vfmasq_n(p0,p1,p2)
-#define __arm_vfmasq_n(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
- __typeof(p1) __p1 = (p1); \
- __typeof(p2) __p2 = (p2); \
- _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
- int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vfmasq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t)), \
- int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vfmasq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t)));})
-
#define vfmsq(p0,p1,p2) __arm_vfmsq(p0,p1,p2)
#define __arm_vfmsq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
__typeof(p1) __p1 = (p1); \
@@ -18101,6 +21131,14 @@ extern void *__ARM_undef;
int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vfmsq_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t)), \
int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vfmsq_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t)));})
+#define vfmasq(p0,p1,p2) __arm_vfmasq(p0,p1,p2)
+#define __arm_vfmasq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vfmasq_n_f16 (__ARM_mve_coerce(__p0, float16x8_t), __ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vfmasq_n_f32 (__ARM_mve_coerce(__p0, float32x4_t), __ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t)));})
+
#define vmaxnmaq_m(p0,p1,p2) __arm_vmaxnmaq_m(p0,p1,p2)
#define __arm_vmaxnmaq_m(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
__typeof(p1) __p1 = (p1); \
@@ -19154,6 +22192,306 @@ extern void *__ARM_undef;
int (*)[__ARM_mve_type_uint32x4_t]: __arm_vstrwq_scatter_base_wb_p_u32 (p0, p1, __ARM_mve_coerce(__p2, uint32x4_t), p3), \
int (*)[__ARM_mve_type_float32x4_t]: __arm_vstrwq_scatter_base_wb_p_f32 (p0, p1, __ARM_mve_coerce(__p2, float32x4_t), p3));})
+#define vabdq_x(p1,p2,p3) __arm_vabdq_x(p1,p2,p3)
+#define __arm_vabdq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vabdq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vabdq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define vabsq_x(p1,p2) __arm_vabsq_x(p1,p2)
+#define __arm_vabsq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vabsq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vabsq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define vaddq_x(p1,p2,p3) __arm_vaddq_x(p1,p2,p3)
+#define __arm_vaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vaddq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vaddq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vaddq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vaddq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vaddq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vaddq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vaddq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t), p3));})
+
+#define vandq_x(p1,p2,p3) __arm_vandq_x(p1,p2,p3)
+#define __arm_vandq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vandq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vandq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define vbicq_x(p1,p2,p3) __arm_vbicq_x(p1,p2,p3)
+#define __arm_vbicq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vbicq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vbicq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define vbrsrq_x(p1,p2,p3) __arm_vbrsrq_x(p1,p2,p3)
+#define __arm_vbrsrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vbrsrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vbrsrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vbrsrq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vbrsrq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2, p3));})
+
+#define vcaddq_rot270_x(p1,p2,p3) __arm_vcaddq_rot270_x(p1,p2,p3)
+#define __arm_vcaddq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot270_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot270_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define vcaddq_rot90_x(p1,p2,p3) __arm_vcaddq_rot90_x(p1,p2,p3)
+#define __arm_vcaddq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcaddq_rot90_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcaddq_rot90_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define vcmulq_rot180_x(p1,p2,p3) __arm_vcmulq_rot180_x(p1,p2,p3)
+#define __arm_vcmulq_rot180_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot180_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot180_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define vcmulq_rot270_x(p1,p2,p3) __arm_vcmulq_rot270_x(p1,p2,p3)
+#define __arm_vcmulq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot270_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot270_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define vcmulq_x(p1,p2,p3) __arm_vcmulq_x(p1,p2,p3)
+#define __arm_vcmulq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define vcvtq_x(p1,p2) __arm_vcvtq_x(p1,p2)
+#define __arm_vcvtq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_x_f16_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vcvtq_x_f32_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_x_f16_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_x_f32_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define vcvtq_x_n(p1,p2,p3) __arm_vcvtq_x_n(p1,p2,p3)
+#define __arm_vcvtq_x_n(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vcvtq_x_n_f16_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vcvtq_x_n_f32_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vcvtq_x_n_f16_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vcvtq_x_n_f32_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
+#define veorq_x(p1,p2,p3) __arm_veorq_x(p1,p2,p3)
+#define __arm_veorq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_x_s8(__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_x_s16(__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_x_s32(__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_veorq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_veorq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define vmaxnmq_x(p1,p2,p3) __arm_vmaxnmq_x(p1,p2,p3)
+#define __arm_vmaxnmq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmaxnmq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmaxnmq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define vminnmq_x(p1,p2,p3) __arm_vminnmq_x(p1,p2,p3)
+#define __arm_vminnmq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vminnmq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vminnmq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define vmulq_x(p1,p2,p3) __arm_vmulq_x(p1,p2,p3)
+#define __arm_vmulq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmulq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmulq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmulq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmulq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmulq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmulq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vmulq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vmulq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vmulq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vmulq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t), p3));})
+
+#define vnegq_x(p1,p2) __arm_vnegq_x(p1,p2)
+#define __arm_vnegq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vnegq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vnegq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define vornq_x(p1,p2,p3) __arm_vornq_x(p1,p2,p3)
+#define __arm_vornq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vornq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vornq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define vorrq_x(p1,p2,p3) __arm_vorrq_x(p1,p2,p3)
+#define __arm_vorrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vorrq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vorrq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
+#define vrev32q_x(p1,p2) __arm_vrev32q_x(p1,p2)
+#define __arm_vrev32q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev32q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev32q_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2));})
+
+#define vrev64q_x(p1,p2) __arm_vrev64q_x(p1,p2)
+#define __arm_vrev64q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev64q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vrev64q_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev64q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2), \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrev64q_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrev64q_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define vrndaq_x(p1,p2) __arm_vrndaq_x(p1,p2)
+#define __arm_vrndaq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndaq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndaq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define vrndmq_x(p1,p2) __arm_vrndmq_x(p1,p2)
+#define __arm_vrndmq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndmq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndmq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define vrndnq_x(p1,p2) __arm_vrndnq_x(p1,p2)
+#define __arm_vrndnq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndnq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndnq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define vrndpq_x(p1,p2) __arm_vrndpq_x(p1,p2)
+#define __arm_vrndpq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndpq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndpq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define vrndq_x(p1,p2) __arm_vrndq_x(p1,p2)
+#define __arm_vrndq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define vrndxq_x(p1,p2) __arm_vrndxq_x(p1,p2)
+#define __arm_vrndxq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_float16x8_t]: __arm_vrndxq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), p2), \
+ int (*)[__ARM_mve_type_float32x4_t]: __arm_vrndxq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), p2));})
+
+#define vsubq_x(p1,p2,p3) __arm_vsubq_x(p1,p2,p3)
+#define __arm_vsubq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vsubq_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vsubq_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16_t]: __arm_vsubq_x_n_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32_t]: __arm_vsubq_x_n_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32_t), p3));})
+
+#define vcmulq_rot90_x(p1,p2,p3) __arm_vcmulq_rot90_x(p1,p2,p3)
+#define __arm_vcmulq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]: __arm_vcmulq_rot90_x_f16 (__ARM_mve_coerce(__p1, float16x8_t), __ARM_mve_coerce(__p2, float16x8_t), p3), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]: __arm_vcmulq_rot90_x_f32 (__ARM_mve_coerce(__p1, float32x4_t), __ARM_mve_coerce(__p2, float32x4_t), p3));})
+
#else /* MVE Integer. */
#define vstrwq_scatter_base_wb(p0,p1,p2) __arm_vstrwq_scatter_base_wb(p0,p1,p2)
@@ -21648,8 +24986,509 @@ extern void *__ARM_undef;
int (*)[__ARM_mve_type_uint32x4_t]: __arm_vreinterpretq_u8_u32 (__ARM_mve_coerce(__p0, uint32x4_t)), \
int (*)[__ARM_mve_type_uint64x2_t]: __arm_vreinterpretq_u8_u64 (__ARM_mve_coerce(__p0, uint64x2_t)));})
+#define vabsq_x(p1,p2) __arm_vabsq_x(p1,p2)
+#define __arm_vabsq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vabsq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vabsq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vabsq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define vaddq_x(p1,p2,p3) __arm_vaddq_x(p1,p2,p3)
+#define __arm_vaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vaddq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vaddq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vaddq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vaddq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));})
+
+#define vcaddq_rot270_x(p1,p2,p3) __arm_vcaddq_rot270_x(p1,p2,p3)
+#define __arm_vcaddq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot270_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot270_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot270_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot270_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot270_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot270_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define vcaddq_rot90_x(p1,p2,p3) __arm_vcaddq_rot90_x(p1,p2,p3)
+#define __arm_vcaddq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vcaddq_rot90_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vcaddq_rot90_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vcaddq_rot90_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vcaddq_rot90_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vcaddq_rot90_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vcaddq_rot90_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define veorq_x(p1,p2,p3) __arm_veorq_x(p1,p2,p3)
+#define __arm_veorq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_veorq_x_s8(__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_veorq_x_s16(__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_veorq_x_s32(__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_veorq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_veorq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_veorq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define vmaxq_x(p1,p2,p3) __arm_vmaxq_x(p1,p2,p3)
+#define __arm_vmaxq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmaxq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmaxq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmaxq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmaxq_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmaxq_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmaxq_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define vminq_x(p1,p2,p3) __arm_vminq_x(p1,p2,p3)
+#define __arm_vminq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vminq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vminq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vminq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vminq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vminq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vminq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define vmovlbq_x(p1,p2) __arm_vmovlbq_x(p1,p2)
+#define __arm_vmovlbq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovlbq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovlbq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovlbq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovlbq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));})
+
+#define vmovltq_x(p1,p2) __arm_vmovltq_x(p1,p2)
+#define __arm_vmovltq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vmovltq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vmovltq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmovltq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmovltq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));})
+
+#define vmulhq_x(p1,p2,p3) __arm_vmulhq_x(p1,p2,p3)
+#define __arm_vmulhq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulhq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulhq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulhq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulhq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulhq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulhq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define vmullbq_int_x(p1,p2,p3) __arm_vmullbq_int_x(p1,p2,p3)
+#define __arm_vmullbq_int_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmullbq_int_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmullbq_int_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmullbq_int_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_int_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_int_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmullbq_int_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define vmullbq_poly_x(p1,p2,p3) __arm_vmullbq_poly_x(p1,p2,p3)
+#define __arm_vmullbq_poly_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmullbq_poly_x_p8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmullbq_poly_x_p16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));})
+
+#define vmulltq_int_x(p1,p2,p3) __arm_vmulltq_int_x(p1,p2,p3)
+#define __arm_vmulltq_int_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulltq_int_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulltq_int_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulltq_int_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_int_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_int_x_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulltq_int_x_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define vmulltq_poly_x(p1,p2,p3) __arm_vmulltq_poly_x(p1,p2,p3)
+#define __arm_vmulltq_poly_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulltq_poly_x_p8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulltq_poly_x_p16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3));})
+
+#define vmulq_x(p1,p2,p3) __arm_vmulq_x(p1,p2,p3)
+#define __arm_vmulq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vmulq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vmulq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vmulq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vmulq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vmulq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vmulq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vmulq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vmulq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vmulq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vmulq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vmulq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vmulq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3));})
+
+#define vnegq_x(p1,p2) __arm_vnegq_x(p1,p2)
+#define __arm_vnegq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vnegq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vnegq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vnegq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define vornq_x(p1,p2,p3) __arm_vornq_x(p1,p2,p3)
+#define __arm_vornq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vornq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vornq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vornq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vornq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vornq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vornq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define vorrq_x(p1,p2,p3) __arm_vorrq_x(p1,p2,p3)
+#define __arm_vorrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vorrq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vorrq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vorrq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vorrq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vorrq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vorrq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define vrev32q_x(p1,p2) __arm_vrev32q_x(p1,p2)
+#define __arm_vrev32q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev32q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev32q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev32q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev32q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2));})
+
+#define vrev64q_x(p1,p2) __arm_vrev64q_x(p1,p2)
+#define __arm_vrev64q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev64q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vrev64q_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vrev64q_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev64q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrev64q_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrev64q_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define vabdq_x(p1,p2,p3) __arm_vabdq_x(p1,p2,p3)
+#define __arm_vabdq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vabdq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vabdq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vabdq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vabdq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vabdq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vabdq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define vandq_x(p1,p2,p3) __arm_vandq_x(p1,p2,p3)
+#define __arm_vandq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vandq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vandq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vandq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vandq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vandq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vandq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define vbicq_x(p1,p2,p3) __arm_vbicq_x(p1,p2,p3)
+#define __arm_vbicq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vbicq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vbicq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vbicq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vbicq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vbicq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vbicq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define vbrsrq_x(p1,p2,p3) __arm_vbrsrq_x(p1,p2,p3)
+#define __arm_vbrsrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vbrsrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vbrsrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vbrsrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vbrsrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vbrsrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vbrsrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
#endif /* MVE Integer. */
+#define vmvnq_x(p1,p2) __arm_vmvnq_x(p1,p2)
+#define __arm_vmvnq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vmvnq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vmvnq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vmvnq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vmvnq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vmvnq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vmvnq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define vrev16q_x(p1,p2) __arm_vrev16q_x(p1,p2)
+#define __arm_vrev16q_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrev16q_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrev16q_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2));})
+
+#define vrhaddq_x(p1,p2,p3) __arm_vrhaddq_x(p1,p2,p3)
+#define __arm_vrhaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrhaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrhaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrhaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrhaddq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrhaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrhaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define vshlq_x(p0,p1,p2,p3) __arm_vshlq_x(p0,p1,p2,p3)
+#define __arm_vshlq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vshlq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vshlq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vshlq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define vrmulhq_x(p1,p2,p3) __arm_vrmulhq_x(p1,p2,p3)
+#define __arm_vrmulhq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrmulhq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrmulhq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrmulhq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vrmulhq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vrmulhq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vrmulhq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define vrshlq_x(p1,p2,p3) __arm_vrshlq_x(p1,p2,p3)
+#define __arm_vrshlq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_int8x16_t]: __arm_vrshlq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_int16x8_t]: __arm_vrshlq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_int32x4_t]: __arm_vrshlq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define vrshrq_x(p1,p2,p3) __arm_vrshrq_x(p1,p2,p3)
+#define __arm_vrshrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vrshrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vrshrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vrshrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vrshrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vrshrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vrshrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
+#define vshllbq_x(p1,p2,p3) __arm_vshllbq_x(p1,p2,p3)
+#define __arm_vshllbq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshllbq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vshllbq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshllbq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshllbq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3));})
+
+#define vshlltq_x(p1,p2,p3) __arm_vshlltq_x(p1,p2,p3)
+#define __arm_vshlltq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlltq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlltq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlltq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlltq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3));})
+
+#define vshlq_x_n(p1,p2,p3) __arm_vshlq_x_n(p1,p2,p3)
+#define __arm_vshlq_x_n(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
+#define vdwdupq_x_u8(p1,p2,p3,p4) __arm_vdwdupq_x_u8(p1,p2,p3,p4)
+#define __arm_vdwdupq_x_u8(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint32_t]: __arm_vdwdupq_x_n_u8 (__ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
+
+#define vdwdupq_x_u16(p1,p2,p3,p4) __arm_vdwdupq_x_u16(p1,p2,p3,p4)
+#define __arm_vdwdupq_x_u16(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint32_t]: __arm_vdwdupq_x_n_u16 (__ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
+
+#define vdwdupq_x_u32(p1,p2,p3,p4) __arm_vdwdupq_x_u32(p1,p2,p3,p4)
+#define __arm_vdwdupq_x_u32(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint32_t]: __arm_vdwdupq_x_n_u32 (__ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
+
+#define viwdupq_x_u8(p1,p2,p3,p4) __arm_viwdupq_x_u8(p1,p2,p3,p4)
+#define __arm_viwdupq_x_u8(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint32_t]: __arm_viwdupq_x_n_u8 (__ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
+
+#define viwdupq_x_u16(p1,p2,p3,p4) __arm_viwdupq_x_u16(p1,p2,p3,p4)
+#define __arm_viwdupq_x_u16(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint32_t]: __arm_viwdupq_x_n_u16 (__ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
+
+#define viwdupq_x_u32(p1,p2,p3,p4) __arm_viwdupq_x_u32(p1,p2,p3,p4)
+#define __arm_viwdupq_x_u32(p1,p2,p3,p4) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint32_t]: __arm_viwdupq_x_n_u32 (__ARM_mve_coerce(__p1, uint32_t), p2, p3, p4), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_viwdupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3, p4));})
+
+#define vidupq_x_u8(p1,p2,p3) __arm_vidupq_x_u8(p1,p2,p3)
+#define __arm_vidupq_x_u8(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint32_t]: __arm_vidupq_x_n_u8 (__ARM_mve_coerce(__p1, uint32_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
+
+#define vddupq_x_u8(p1,p2,p3) __arm_vddupq_x_u8(p1,p2,p3)
+#define __arm_vddupq_x_u8(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint32_t]: __arm_vddupq_x_n_u8 (__ARM_mve_coerce(__p1, uint32_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u8 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
+
+#define vidupq_x_u16(p1,p2,p3) __arm_vidupq_x_u16(p1,p2,p3)
+#define __arm_vidupq_x_u16(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint32_t]: __arm_vidupq_x_n_u16 (__ARM_mve_coerce(__p1, uint32_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
+
+#define vddupq_x_u16(p1,p2,p3) __arm_vddupq_x_u16(p1,p2,p3)
+#define __arm_vddupq_x_u16(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint32_t]: __arm_vddupq_x_n_u16 (__ARM_mve_coerce(__p1, uint32_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u16 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
+
+#define vidupq_x_u32(p1,p2,p3) __arm_vidupq_x_u32(p1,p2,p3)
+#define __arm_vidupq_x_u32(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint32_t]: __arm_vidupq_x_n_u32 (__ARM_mve_coerce(__p1, uint32_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vidupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
+
+#define vddupq_x_u32(p1,p2,p3) __arm_vddupq_x_u32(p1,p2,p3)
+#define __arm_vddupq_x_u32(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint32_t]: __arm_vddupq_x_n_u32 (__ARM_mve_coerce(__p1, uint32_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vddupq_x_wb_u32 (__ARM_mve_coerce(__p1, uint32_t *), p2, p3));})
+
+#define vhaddq_x(p1,p2,p3) __arm_vhaddq_x(p1,p2,p3)
+#define vshrq_x(p1,p2,p3) __arm_vshrq_x(p1,p2,p3)
+#define __arm_vshrq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vshrq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vshrq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vshrq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshrq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshrq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2, p3), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshrq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2, p3));})
+
+#define __arm_vhaddq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vhaddq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vhaddq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vhaddq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vhaddq_x_n_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vhaddq_x_n_u16( __ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vhaddq_x_n_u32( __ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhaddq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhaddq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhaddq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhaddq_x_u8( __ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhaddq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhaddq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define vhcaddq_rot270_x(p1,p2,p3) __arm_vhcaddq_rot270_x(p1,p2,p3)
+#define __arm_vhcaddq_rot270_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot270_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot270_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot270_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define vhcaddq_rot90_x(p1,p2,p3) __arm_vhcaddq_rot90_x(p1,p2,p3)
+#define __arm_vhcaddq_rot90_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhcaddq_rot90_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhcaddq_rot90_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhcaddq_rot90_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3));})
+
+#define vhsubq_x(p1,p2,p3) __arm_vhsubq_x(p1,p2,p3)
+#define __arm_vhsubq_x(p1,p2,p3) ({ __typeof(p1) __p1 = (p1); \
+ __typeof(p2) __p2 = (p2); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)][__ARM_mve_typeid(__p2)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vhsubq_x_n_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vhsubq_x_n_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vhsubq_x_n_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vhsubq_x_n_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]: __arm_vhsubq_x_n_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]: __arm_vhsubq_x_n_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32_t), p3), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vhsubq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), __ARM_mve_coerce(__p2, int8x16_t), p3), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vhsubq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), __ARM_mve_coerce(__p2, int16x8_t), p3), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vhsubq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), __ARM_mve_coerce(__p2, int32x4_t), p3), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]: __arm_vhsubq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), __ARM_mve_coerce(__p2, uint8x16_t), p3), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]: __arm_vhsubq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), __ARM_mve_coerce(__p2, uint16x8_t), p3), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vhsubq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), __ARM_mve_coerce(__p2, uint32x4_t), p3));})
+
+#define vclsq_x(p1,p2) __arm_vclsq_x(p1,p2)
+#define __arm_vclsq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vclsq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vclsq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vclsq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2));})
+
+#define vclzq_x(p1,p2) __arm_vclzq_x(p1,p2)
+#define __arm_vclzq_x(p1,p2) ({ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t]: __arm_vclzq_x_s8 (__ARM_mve_coerce(__p1, int8x16_t), p2), \
+ int (*)[__ARM_mve_type_int16x8_t]: __arm_vclzq_x_s16 (__ARM_mve_coerce(__p1, int16x8_t), p2), \
+ int (*)[__ARM_mve_type_int32x4_t]: __arm_vclzq_x_s32 (__ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint8x16_t]: __arm_vclzq_x_u8 (__ARM_mve_coerce(__p1, uint8x16_t), p2), \
+ int (*)[__ARM_mve_type_uint16x8_t]: __arm_vclzq_x_u16 (__ARM_mve_coerce(__p1, uint16x8_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t]: __arm_vclzq_x_u32 (__ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
+#define vadciq(p0,p1,p2) __arm_vadciq(p0,p1,p2)
+#define __arm_vadciq(p0,p1,p2) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vadciq_s32 (__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t), p2), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]: __arm_vadciq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1, uint32x4_t), p2));})
+
#define vstrdq_scatter_base_wb_p(p0,p1,p2,p3) __arm_vstrdq_scatter_base_wb_p(p0,p1,p2,p3)
#define __arm_vstrdq_scatter_base_wb_p(p0,p1,p2,p3) ({ __typeof(p2) __p2 = (p2); \
_Generic( (int (*)[__ARM_mve_typeid(__p2)])0, \
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index 11b7afb..adf53b3 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,4 +1,382 @@
2020-03-20 Srinath Parvathaneni <srinath.parvathaneni@arm.com>
+
+ * gcc.target/arm/mve/intrinsics/vabdq_x_f16.c: New test.
+ * gcc.target/arm/mve/intrinsics/vabdq_x_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vabdq_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vabdq_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vabdq_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vabdq_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vabdq_x_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vabdq_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vabsq_x_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vabsq_x_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vabsq_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vabsq_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vabsq_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddq_x_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddq_x_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddq_x_n_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddq_x_n_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddq_x_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddq_x_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddq_x_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddq_x_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddq_x_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddq_x_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddq_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddq_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddq_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddq_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddq_x_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vaddq_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vandq_x_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vandq_x_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vandq_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vandq_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vandq_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vandq_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vandq_x_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vandq_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbicq_x_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbicq_x_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbicq_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbicq_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbicq_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbicq_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbicq_x_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbicq_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbrsrq_x_n_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbrsrq_x_n_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbrsrq_x_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbrsrq_x_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbrsrq_x_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbrsrq_x_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbrsrq_x_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vbrsrq_x_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vclsq_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vclsq_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vclsq_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vclzq_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vclzq_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vclzq_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vclzq_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vclzq_x_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vclzq_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmulq_rot180_x_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmulq_rot180_x_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmulq_rot270_x_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmulq_rot270_x_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmulq_rot90_x_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmulq_rot90_x_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmulq_x_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcmulq_x_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtaq_x_s16_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtaq_x_s32_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtaq_x_u16_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtaq_x_u32_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtbq_x_f32_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtmq_x_s16_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtmq_x_s32_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtmq_x_u16_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtmq_x_u32_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtnq_x_s16_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtnq_x_s32_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtnq_x_u16_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtnq_x_u32_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtpq_x_s16_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtpq_x_s32_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtpq_x_u16_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtpq_x_u32_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtq_x_f16_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtq_x_f16_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtq_x_f32_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtq_x_f32_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtq_x_n_f16_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtq_x_n_f16_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtq_x_n_f32_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtq_x_n_f32_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtq_x_n_s16_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtq_x_n_s32_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtq_x_n_u16_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtq_x_n_u32_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtq_x_s16_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtq_x_s32_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtq_x_u16_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvtq_x_u32_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vcvttq_x_f32_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vddupq_x_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vddupq_x_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vddupq_x_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vddupq_x_wb_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vddupq_x_wb_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vddupq_x_wb_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vdupq_x_n_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vdupq_x_n_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vdupq_x_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vdupq_x_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vdupq_x_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vdupq_x_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vdupq_x_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vdupq_x_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/veorq_x_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/veorq_x_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/veorq_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/veorq_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/veorq_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/veorq_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/veorq_x_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/veorq_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhaddq_x_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhaddq_x_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhaddq_x_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhaddq_x_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhaddq_x_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhaddq_x_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhaddq_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhaddq_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhaddq_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhaddq_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhaddq_x_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhaddq_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhcaddq_rot270_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhcaddq_rot270_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhcaddq_rot270_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhcaddq_rot90_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhcaddq_rot90_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhcaddq_rot90_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhsubq_x_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhsubq_x_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhsubq_x_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhsubq_x_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhsubq_x_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhsubq_x_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhsubq_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhsubq_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhsubq_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhsubq_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhsubq_x_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vhsubq_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vidupq_x_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vidupq_x_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vidupq_x_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vidupq_x_wb_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vidupq_x_wb_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vidupq_x_wb_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/viwdupq_x_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/viwdupq_x_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/viwdupq_x_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmaxnmq_x_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmaxnmq_x_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmaxq_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmaxq_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmaxq_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmaxq_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmaxq_x_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmaxq_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vminnmq_x_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vminnmq_x_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vminq_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vminq_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vminq_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vminq_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vminq_x_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vminq_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmovlbq_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmovlbq_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmovlbq_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmovlbq_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmovltq_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmovltq_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmovltq_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmovltq_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulhq_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulhq_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulhq_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulhq_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulhq_x_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulhq_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmullbq_int_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmullbq_int_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmullbq_int_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmullbq_int_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmullbq_int_x_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmullbq_int_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmullbq_poly_x_p16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmullbq_poly_x_p8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulltq_int_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulltq_int_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulltq_int_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulltq_int_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulltq_int_x_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulltq_int_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulltq_poly_x_p16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulltq_poly_x_p8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_x_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_x_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_x_n_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_x_n_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_x_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_x_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_x_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_x_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_x_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_x_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_x_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmulq_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmvnq_x_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmvnq_x_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmvnq_x_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmvnq_x_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmvnq_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmvnq_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmvnq_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmvnq_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmvnq_x_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vmvnq_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vnegq_x_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vnegq_x_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vnegq_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vnegq_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vnegq_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vornq_x_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vornq_x_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vornq_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vornq_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vornq_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vornq_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vornq_x_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vornq_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vorrq_x_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vorrq_x_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vorrq_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vorrq_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vorrq_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vorrq_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vorrq_x_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vorrq_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrev16q_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrev16q_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrev32q_x_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrev32q_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrev32q_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrev32q_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrev32q_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrev64q_x_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrev64q_x_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrev64q_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrev64q_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrev64q_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrev64q_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrev64q_x_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrev64q_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrhaddq_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrhaddq_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrhaddq_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrhaddq_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrhaddq_x_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrhaddq_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrmulhq_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrmulhq_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrmulhq_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrmulhq_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrmulhq_x_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrmulhq_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrndaq_x_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrndaq_x_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrndmq_x_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrndmq_x_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrndnq_x_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrndnq_x_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrndpq_x_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrndpq_x_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrndq_x_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrndq_x_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrndxq_x_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrndxq_x_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshlq_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshlq_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshlq_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshlq_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshlq_x_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshlq_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshrq_x_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshrq_x_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshrq_x_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshrq_x_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshrq_x_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vrshrq_x_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshllbq_x_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshllbq_x_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshllbq_x_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshllbq_x_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlltq_x_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlltq_x_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlltq_x_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlltq_x_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_x_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_x_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_x_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_x_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_x_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_x_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_x_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshlq_x_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshrq_x_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshrq_x_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshrq_x_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshrq_x_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vshrq_x_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_x_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_x_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_x_n_f16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_x_n_f32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_x_n_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_x_n_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_x_n_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_x_n_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_x_n_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_x_n_u8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_x_s16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_x_s32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_x_s8.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_x_u16.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_x_u32.c: Likewise.
+ * gcc.target/arm/mve/intrinsics/vsubq_x_u8.c: Likewise.
+
+2020-03-20 Srinath Parvathaneni <srinath.parvathaneni@arm.com>
Andre Vieira <andre.simoesdiasvieira@arm.com>
Mihail Ionescu <mihail.ionescu@arm.com>
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_f16.c
new file mode 100644
index 0000000..2663db2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_f16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vabdq_x_f16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vabdt.f16" } } */
+
+float16x8_t
+foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vabdq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_f32.c
new file mode 100644
index 0000000..0a5eb5c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_f32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vabdq_x_f32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vabdt.f32" } } */
+
+float32x4_t
+foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vabdq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_s16.c
new file mode 100644
index 0000000..3dd5e5d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_s16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vabdq_x_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vabdt.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vabdq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vabdt.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_s32.c
new file mode 100644
index 0000000..9b1f30f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_s32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vabdq_x_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vabdt.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vabdq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_s8.c
new file mode 100644
index 0000000..0e386b5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vabdq_x_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vabdt.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vabdq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_u16.c
new file mode 100644
index 0000000..ff7599a1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_u16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vabdq_x_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vabdt.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vabdq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_u32.c
new file mode 100644
index 0000000..d93c080
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_u32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vabdq_x_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vabdt.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vabdq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_u8.c
new file mode 100644
index 0000000..73f89e4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabdq_x_u8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vabdq_x_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vabdt.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vabdq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_f16.c
new file mode 100644
index 0000000..4fb74a8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_f16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, mve_pred16_t p)
+{
+ return vabsq_x_f16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vabst.f16" } } */
+
+float16x8_t
+foo1 (float16x8_t a, mve_pred16_t p)
+{
+ return vabsq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_f32.c
new file mode 100644
index 0000000..2bad3c1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_f32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, mve_pred16_t p)
+{
+ return vabsq_x_f32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vabst.f32" } } */
+
+float32x4_t
+foo1 (float32x4_t a, mve_pred16_t p)
+{
+ return vabsq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_s16.c
new file mode 100644
index 0000000..84c0a8a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_s16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, mve_pred16_t p)
+{
+ return vabsq_x_s16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vabst.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, mve_pred16_t p)
+{
+ return vabsq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_s32.c
new file mode 100644
index 0000000..cf427a4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_s32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, mve_pred16_t p)
+{
+ return vabsq_x_s32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vabst.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, mve_pred16_t p)
+{
+ return vabsq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_s8.c
new file mode 100644
index 0000000..a3fa974
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vabsq_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, mve_pred16_t p)
+{
+ return vabsq_x_s8 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vabst.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, mve_pred16_t p)
+{
+ return vabsq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_f16.c
new file mode 100644
index 0000000..ec3a85d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_f16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vaddq_x_f16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.f16" } } */
+
+float16x8_t
+foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.f16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_f32.c
new file mode 100644
index 0000000..b9b216c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_f32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vaddq_x_f32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.f32" } } */
+
+float32x4_t
+foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.f32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_f16.c
new file mode 100644
index 0000000..2e65cbf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_f16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, float16_t b, mve_pred16_t p)
+{
+ return vaddq_x_n_f16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.f16" } } */
+
+float16x8_t
+foo1 (float16x8_t a, float16_t b, mve_pred16_t p)
+{
+ return vaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.f16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_f32.c
new file mode 100644
index 0000000..11243a6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_f32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, float32_t b, mve_pred16_t p)
+{
+ return vaddq_x_n_f32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.f32" } } */
+
+float32x4_t
+foo1 (float32x4_t a, float32_t b, mve_pred16_t p)
+{
+ return vaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.f32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_s16.c
new file mode 100644
index 0000000..0ef9afd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_s16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16_t b, mve_pred16_t p)
+{
+ return vaddq_x_n_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.i16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16_t b, mve_pred16_t p)
+{
+ return vaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_s32.c
new file mode 100644
index 0000000..20b88a4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_s32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32_t b, mve_pred16_t p)
+{
+ return vaddq_x_n_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.i32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32_t b, mve_pred16_t p)
+{
+ return vaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_s8.c
new file mode 100644
index 0000000..2bc9072
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_s8.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8_t b, mve_pred16_t p)
+{
+ return vaddq_x_n_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.i8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8_t b, mve_pred16_t p)
+{
+ return vaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_u16.c
new file mode 100644
index 0000000..9e3ffb4c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_u16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16_t b, mve_pred16_t p)
+{
+ return vaddq_x_n_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.i16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16_t b, mve_pred16_t p)
+{
+ return vaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_u32.c
new file mode 100644
index 0000000..ab5140e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_u32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32_t b, mve_pred16_t p)
+{
+ return vaddq_x_n_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.i32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32_t b, mve_pred16_t p)
+{
+ return vaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_u8.c
new file mode 100644
index 0000000..0bdc00d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_n_u8.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8_t b, mve_pred16_t p)
+{
+ return vaddq_x_n_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.i8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8_t b, mve_pred16_t p)
+{
+ return vaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_s16.c
new file mode 100644
index 0000000..c33c5cf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_s16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vaddq_x_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.i16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_s32.c
new file mode 100644
index 0000000..4581190
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_s32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vaddq_x_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.i32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_s8.c
new file mode 100644
index 0000000..1aa65c9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_s8.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vaddq_x_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.i8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_u16.c
new file mode 100644
index 0000000..eef9acc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_u16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vaddq_x_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.i16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_u32.c
new file mode 100644
index 0000000..e8e430b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_u32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vaddq_x_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.i32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_u8.c
new file mode 100644
index 0000000..1b77145
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_x_u8.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vaddq_x_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.i8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vaddt.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_f16.c
new file mode 100644
index 0000000..357e4b3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_f16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vandq_x_f16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vandt" } } */
+
+float16x8_t
+foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vandq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_f32.c
new file mode 100644
index 0000000..0e6cdf6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_f32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vandq_x_f32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vandt" } } */
+
+float32x4_t
+foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vandq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_s16.c
new file mode 100644
index 0000000..c18a19e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_s16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vandq_x_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vandt" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vandq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_s32.c
new file mode 100644
index 0000000..3509dc7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_s32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vandq_x_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vandt" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vandq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_s8.c
new file mode 100644
index 0000000..a4f0eec
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vandq_x_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vandt" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vandq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_u16.c
new file mode 100644
index 0000000..e85c5bd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_u16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vandq_x_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vandt" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vandq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_u32.c
new file mode 100644
index 0000000..89430be
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_u32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vandq_x_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vandt" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vandq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_u8.c
new file mode 100644
index 0000000..ab074c2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vandq_x_u8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vandq_x_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vandt" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vandq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_f16.c
new file mode 100644
index 0000000..12036af
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_f16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vbicq_x_f16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vbict" } } */
+
+float16x8_t
+foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vbicq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_f32.c
new file mode 100644
index 0000000..784653f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_f32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vbicq_x_f32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vbict" } } */
+
+float32x4_t
+foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vbicq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_s16.c
new file mode 100644
index 0000000..b0b34ae
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_s16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vbicq_x_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vbict" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vbicq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_s32.c
new file mode 100644
index 0000000..c023cfa
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_s32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vbicq_x_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vbict" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vbicq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_s8.c
new file mode 100644
index 0000000..3355dc6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vbicq_x_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vbict" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vbicq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_u16.c
new file mode 100644
index 0000000..abdcf00
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_u16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vbicq_x_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vbict" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vbicq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_u32.c
new file mode 100644
index 0000000..2aa34c5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_u32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vbicq_x_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vbict" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vbicq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_u8.c
new file mode 100644
index 0000000..26c9e2c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbicq_x_u8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vbicq_x_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vbict" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vbicq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_f16.c
new file mode 100644
index 0000000..282837a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_f16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, int32_t b, mve_pred16_t p)
+{
+ return vbrsrq_x_n_f16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vbrsrt.16" } } */
+
+float16x8_t
+foo1 (float16x8_t a, int32_t b, mve_pred16_t p)
+{
+ return vbrsrq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vbrsrt.16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_f32.c
new file mode 100644
index 0000000..74ef4e6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_f32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, int32_t b, mve_pred16_t p)
+{
+ return vbrsrq_x_n_f32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vbrsrt.32" } } */
+
+float32x4_t
+foo1 (float32x4_t a, int32_t b, mve_pred16_t p)
+{
+ return vbrsrq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vbrsrt.32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_s16.c
new file mode 100644
index 0000000..7f9dc4e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_s16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int32_t b, mve_pred16_t p)
+{
+ return vbrsrq_x_n_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vbrsrt.16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int32_t b, mve_pred16_t p)
+{
+ return vbrsrq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vbrsrt.16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_s32.c
new file mode 100644
index 0000000..4a4ffde
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_s32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32_t b, mve_pred16_t p)
+{
+ return vbrsrq_x_n_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vbrsrt.32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32_t b, mve_pred16_t p)
+{
+ return vbrsrq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vbrsrt.32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_s8.c
new file mode 100644
index 0000000..db1f6ab
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_s8.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int32_t b, mve_pred16_t p)
+{
+ return vbrsrq_x_n_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vbrsrt.8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int32_t b, mve_pred16_t p)
+{
+ return vbrsrq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vbrsrt.8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_u16.c
new file mode 100644
index 0000000..1bce070
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_u16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, int32_t b, mve_pred16_t p)
+{
+ return vbrsrq_x_n_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vbrsrt.16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, int32_t b, mve_pred16_t p)
+{
+ return vbrsrq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vbrsrt.16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_u32.c
new file mode 100644
index 0000000..6e059c7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_u32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, int32_t b, mve_pred16_t p)
+{
+ return vbrsrq_x_n_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vbrsrt.32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, int32_t b, mve_pred16_t p)
+{
+ return vbrsrq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vbrsrt.32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_u8.c
new file mode 100644
index 0000000..aed3136
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vbrsrq_x_n_u8.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, int32_t b, mve_pred16_t p)
+{
+ return vbrsrq_x_n_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vbrsrt.8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, int32_t b, mve_pred16_t p)
+{
+ return vbrsrq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vbrsrt.8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_f16.c
new file mode 100644
index 0000000..60a96ad
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_f16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vcaddq_rot270_x_f16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcaddt.f16" } } */
+
+float16x8_t
+foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vcaddq_rot270_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_f32.c
new file mode 100644
index 0000000..b79e7d0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_f32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vcaddq_rot270_x_f32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcaddt.f32" } } */
+
+float32x4_t
+foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vcaddq_rot270_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_s16.c
new file mode 100644
index 0000000..efc5820
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_s16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vcaddq_rot270_x_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcaddt.i16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vcaddq_rot270_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_s32.c
new file mode 100644
index 0000000..25cf2ef
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_s32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vcaddq_rot270_x_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcaddt.i32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vcaddq_rot270_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_s8.c
new file mode 100644
index 0000000..8dbdf13
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vcaddq_rot270_x_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcaddt.i8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vcaddq_rot270_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_u16.c
new file mode 100644
index 0000000..13d9aa0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_u16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vcaddq_rot270_x_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcaddt.i16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vcaddq_rot270_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_u32.c
new file mode 100644
index 0000000..887133e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_u32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vcaddq_rot270_x_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcaddt.i32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vcaddq_rot270_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_u8.c
new file mode 100644
index 0000000..e23cbab
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot270_x_u8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vcaddq_rot270_x_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcaddt.i8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vcaddq_rot270_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_f16.c
new file mode 100644
index 0000000..2c96c2a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_f16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vcaddq_rot90_x_f16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcaddt.f16" } } */
+
+float16x8_t
+foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vcaddq_rot90_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_f32.c
new file mode 100644
index 0000000..6ed2793
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_f32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vcaddq_rot90_x_f32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcaddt.f32" } } */
+
+float32x4_t
+foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vcaddq_rot90_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_s16.c
new file mode 100644
index 0000000..9983bae
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_s16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vcaddq_rot90_x_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcaddt.i16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vcaddq_rot90_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_s32.c
new file mode 100644
index 0000000..658763c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_s32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vcaddq_rot90_x_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcaddt.i32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vcaddq_rot90_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_s8.c
new file mode 100644
index 0000000..a63dbe6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vcaddq_rot90_x_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcaddt.i8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vcaddq_rot90_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_u16.c
new file mode 100644
index 0000000..e256cba
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_u16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vcaddq_rot90_x_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcaddt.i16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vcaddq_rot90_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_u32.c
new file mode 100644
index 0000000..9361a98
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_u32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vcaddq_rot90_x_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcaddt.i32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vcaddq_rot90_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_u8.c
new file mode 100644
index 0000000..0664c6e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcaddq_rot90_x_u8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vcaddq_rot90_x_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcaddt.i8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vcaddq_rot90_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_x_s16.c
new file mode 100644
index 0000000..e956fe2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_x_s16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, mve_pred16_t p)
+{
+ return vclsq_x_s16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vclst.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, mve_pred16_t p)
+{
+ return vclsq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_x_s32.c
new file mode 100644
index 0000000..cc00379
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_x_s32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, mve_pred16_t p)
+{
+ return vclsq_x_s32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vclst.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, mve_pred16_t p)
+{
+ return vclsq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_x_s8.c
new file mode 100644
index 0000000..262e04c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclsq_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, mve_pred16_t p)
+{
+ return vclsq_x_s8 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vclst.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, mve_pred16_t p)
+{
+ return vclsq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_s16.c
new file mode 100644
index 0000000..782d385
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_s16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, mve_pred16_t p)
+{
+ return vclzq_x_s16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vclzt.i16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, mve_pred16_t p)
+{
+ return vclzq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_s32.c
new file mode 100644
index 0000000..cb96176
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_s32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, mve_pred16_t p)
+{
+ return vclzq_x_s32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vclzt.i32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, mve_pred16_t p)
+{
+ return vclzq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_s8.c
new file mode 100644
index 0000000..ee7ab8f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, mve_pred16_t p)
+{
+ return vclzq_x_s8 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vclzt.i8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, mve_pred16_t p)
+{
+ return vclzq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_u16.c
new file mode 100644
index 0000000..d4074ab
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_u16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, mve_pred16_t p)
+{
+ return vclzq_x_u16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vclzt.i16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, mve_pred16_t p)
+{
+ return vclzq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_u32.c
new file mode 100644
index 0000000..9711269
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_u32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, mve_pred16_t p)
+{
+ return vclzq_x_u32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vclzt.i32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, mve_pred16_t p)
+{
+ return vclzq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_u8.c
new file mode 100644
index 0000000..baa8eddc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vclzq_x_u8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, mve_pred16_t p)
+{
+ return vclzq_x_u8 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vclzt.i8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, mve_pred16_t p)
+{
+ return vclzq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot180_x_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot180_x_f16.c
new file mode 100644
index 0000000..caed477
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot180_x_f16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vcmulq_rot180_x_f16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcmult.f16" } } */
+
+float16x8_t
+foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vcmulq_rot180_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot180_x_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot180_x_f32.c
new file mode 100644
index 0000000..21814ad
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot180_x_f32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vcmulq_rot180_x_f32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcmult.f32" } } */
+
+float32x4_t
+foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vcmulq_rot180_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot270_x_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot270_x_f16.c
new file mode 100644
index 0000000..14f9391
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot270_x_f16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vcmulq_rot270_x_f16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcmult.f16" } } */
+
+float16x8_t
+foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vcmulq_rot270_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot270_x_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot270_x_f32.c
new file mode 100644
index 0000000..af20dfc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot270_x_f32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vcmulq_rot270_x_f32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcmult.f32" } } */
+
+float32x4_t
+foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vcmulq_rot270_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot90_x_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot90_x_f16.c
new file mode 100644
index 0000000..a7fd380
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot90_x_f16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vcmulq_rot90_x_f16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcmult.f16" } } */
+
+float16x8_t
+foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vcmulq_rot90_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcmult.f16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot90_x_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot90_x_f32.c
new file mode 100644
index 0000000..6730cd7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_rot90_x_f32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vcmulq_rot90_x_f32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcmult.f32" } } */
+
+float32x4_t
+foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vcmulq_rot90_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcmult.f32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_x_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_x_f16.c
new file mode 100644
index 0000000..18d3217
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_x_f16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vcmulq_x_f16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcmult.f16" } } */
+
+float16x8_t
+foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vcmulq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_x_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_x_f32.c
new file mode 100644
index 0000000..7162f49
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcmulq_x_f32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vcmulq_x_f32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcmult.f32" } } */
+
+float32x4_t
+foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vcmulq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_x_s16_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_x_s16_f16.c
new file mode 100644
index 0000000..ed4a0ed
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_x_s16_f16.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (float16x8_t a, mve_pred16_t p)
+{
+ return vcvtaq_x_s16_f16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtat.s16.f16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_x_s32_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_x_s32_f32.c
new file mode 100644
index 0000000..be969ca
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_x_s32_f32.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (float32x4_t a, mve_pred16_t p)
+{
+ return vcvtaq_x_s32_f32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtat.s32.f32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_x_u16_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_x_u16_f16.c
new file mode 100644
index 0000000..93038e9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_x_u16_f16.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (float16x8_t a, mve_pred16_t p)
+{
+ return vcvtaq_x_u16_f16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtat.u16.f16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_x_u32_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_x_u32_f32.c
new file mode 100644
index 0000000..421f238
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtaq_x_u32_f32.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (float32x4_t a, mve_pred16_t p)
+{
+ return vcvtaq_x_u32_f32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtat.u32.f32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtbq_x_f32_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtbq_x_f32_f16.c
new file mode 100644
index 0000000..98e15cb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtbq_x_f32_f16.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float16x8_t a, mve_pred16_t p)
+{
+ return vcvtbq_x_f32_f16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtbt.f32.f16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_x_s16_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_x_s16_f16.c
new file mode 100644
index 0000000..b017c0e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_x_s16_f16.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (float16x8_t a, mve_pred16_t p)
+{
+ return vcvtmq_x_s16_f16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtmt.s16.f16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_x_s32_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_x_s32_f32.c
new file mode 100644
index 0000000..8e5e73c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_x_s32_f32.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (float32x4_t a, mve_pred16_t p)
+{
+ return vcvtmq_x_s32_f32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtmt.s32.f32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_x_u16_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_x_u16_f16.c
new file mode 100644
index 0000000..a747745
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_x_u16_f16.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (float16x8_t a, mve_pred16_t p)
+{
+ return vcvtmq_x_u16_f16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtmt.u16.f16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_x_u32_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_x_u32_f32.c
new file mode 100644
index 0000000..7863427
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtmq_x_u32_f32.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (float32x4_t a, mve_pred16_t p)
+{
+ return vcvtmq_x_u32_f32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtmt.u32.f32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_x_s16_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_x_s16_f16.c
new file mode 100644
index 0000000..b2b7230
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_x_s16_f16.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (float16x8_t a, mve_pred16_t p)
+{
+ return vcvtnq_x_s16_f16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtnt.s16.f16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_x_s32_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_x_s32_f32.c
new file mode 100644
index 0000000..e9f2fad
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_x_s32_f32.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (float32x4_t a, mve_pred16_t p)
+{
+ return vcvtnq_x_s32_f32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtnt.s32.f32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_x_u16_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_x_u16_f16.c
new file mode 100644
index 0000000..0556618
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_x_u16_f16.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (float16x8_t a, mve_pred16_t p)
+{
+ return vcvtnq_x_u16_f16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtnt.u16.f16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_x_u32_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_x_u32_f32.c
new file mode 100644
index 0000000..967bfb8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtnq_x_u32_f32.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (float32x4_t a, mve_pred16_t p)
+{
+ return vcvtnq_x_u32_f32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtnt.u32.f32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_x_s16_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_x_s16_f16.c
new file mode 100644
index 0000000..7ffe464
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_x_s16_f16.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (float16x8_t a, mve_pred16_t p)
+{
+ return vcvtpq_x_s16_f16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtpt.s16.f16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_x_s32_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_x_s32_f32.c
new file mode 100644
index 0000000..0fe3380
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_x_s32_f32.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (float32x4_t a, mve_pred16_t p)
+{
+ return vcvtpq_x_s32_f32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtpt.s32.f32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_x_u16_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_x_u16_f16.c
new file mode 100644
index 0000000..77fd34c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_x_u16_f16.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (float16x8_t a, mve_pred16_t p)
+{
+ return vcvtpq_x_u16_f16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtpt.u16.f16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_x_u32_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_x_u32_f32.c
new file mode 100644
index 0000000..369c941
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtpq_x_u32_f32.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (float32x4_t a, mve_pred16_t p)
+{
+ return vcvtpq_x_u32_f32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtpt.u32.f32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_f16_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_f16_s16.c
new file mode 100644
index 0000000..3c97d6e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_f16_s16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (int16x8_t a, mve_pred16_t p)
+{
+ return vcvtq_x_f16_s16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.f16.s16" } } */
+
+float16x8_t
+foo1 (int16x8_t a, mve_pred16_t p)
+{
+ return vcvtq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.f16.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_f16_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_f16_u16.c
new file mode 100644
index 0000000..14ca8c7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_f16_u16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (uint16x8_t a, mve_pred16_t p)
+{
+ return vcvtq_x_f16_u16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.f16.u16" } } */
+
+float16x8_t
+foo1 (uint16x8_t a, mve_pred16_t p)
+{
+ return vcvtq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.f16.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_f32_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_f32_s32.c
new file mode 100644
index 0000000..b851d91
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_f32_s32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (int32x4_t a, mve_pred16_t p)
+{
+ return vcvtq_x_f32_s32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.f32.s32" } } */
+
+float32x4_t
+foo1 (int32x4_t a, mve_pred16_t p)
+{
+ return vcvtq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.f32.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_f32_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_f32_u32.c
new file mode 100644
index 0000000..4c44257
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_f32_u32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (uint32x4_t a, mve_pred16_t p)
+{
+ return vcvtq_x_f32_u32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.f32.u32" } } */
+
+float32x4_t
+foo1 (uint32x4_t a, mve_pred16_t p)
+{
+ return vcvtq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.f32.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_f16_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_f16_s16.c
new file mode 100644
index 0000000..63d6cc6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_f16_s16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (int16x8_t a, mve_pred16_t p)
+{
+ return vcvtq_x_n_f16_s16 (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.f16.s16" } } */
+
+float16x8_t
+foo1 (int16x8_t a, mve_pred16_t p)
+{
+ return vcvtq_x_n (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.f16.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_f16_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_f16_u16.c
new file mode 100644
index 0000000..13cf17d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_f16_u16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (uint16x8_t a, mve_pred16_t p)
+{
+ return vcvtq_x_n_f16_u16 (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.f16.u16" } } */
+
+float16x8_t
+foo1 (uint16x8_t a, mve_pred16_t p)
+{
+ return vcvtq_x_n (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.f16.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_f32_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_f32_s32.c
new file mode 100644
index 0000000..630d3bf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_f32_s32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (int32x4_t a, mve_pred16_t p)
+{
+ return vcvtq_x_n_f32_s32 (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.f32.s32" } } */
+
+float32x4_t
+foo1 (int32x4_t a, mve_pred16_t p)
+{
+ return vcvtq_x_n (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.f32.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_f32_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_f32_u32.c
new file mode 100644
index 0000000..5d64fec
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_f32_u32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (uint32x4_t a, mve_pred16_t p)
+{
+ return vcvtq_x_n_f32_u32 (a, 16, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.f32.u32" } } */
+
+float32x4_t
+foo1 (uint32x4_t a, mve_pred16_t p)
+{
+ return vcvtq_x_n (a, 16, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.f32.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_s16_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_s16_f16.c
new file mode 100644
index 0000000..1b053fd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_s16_f16.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (float16x8_t a, mve_pred16_t p)
+{
+ return vcvtq_x_n_s16_f16 (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.s16.f16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_s32_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_s32_f32.c
new file mode 100644
index 0000000..2cc8f74
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_s32_f32.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (float32x4_t a, mve_pred16_t p)
+{
+ return vcvtq_x_n_s32_f32 (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.s32.f32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_u16_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_u16_f16.c
new file mode 100644
index 0000000..a555791
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_u16_f16.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (float16x8_t a, mve_pred16_t p)
+{
+ return vcvtq_x_n_u16_f16 (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.u16.f16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_u32_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_u32_f32.c
new file mode 100644
index 0000000..5a8c22d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_n_u32_f32.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (float32x4_t a, mve_pred16_t p)
+{
+ return vcvtq_x_n_u32_f32 (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.u32.f32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_s16_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_s16_f16.c
new file mode 100644
index 0000000..de29fce
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_s16_f16.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (float16x8_t a, mve_pred16_t p)
+{
+ return vcvtq_x_s16_f16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.s16.f16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_s32_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_s32_f32.c
new file mode 100644
index 0000000..312254e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_s32_f32.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (float32x4_t a, mve_pred16_t p)
+{
+ return vcvtq_x_s32_f32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.s32.f32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_u16_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_u16_f16.c
new file mode 100644
index 0000000..de6305e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_u16_f16.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (float16x8_t a, mve_pred16_t p)
+{
+ return vcvtq_x_u16_f16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.u16.f16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_u32_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_u32_f32.c
new file mode 100644
index 0000000..1e1ab55
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvtq_x_u32_f32.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (float32x4_t a, mve_pred16_t p)
+{
+ return vcvtq_x_u32_f32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvtt.u32.f32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvttq_x_f32_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvttq_x_f32_f16.c
new file mode 100644
index 0000000..14090127
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vcvttq_x_f32_f16.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float16x8_t a, mve_pred16_t p)
+{
+ return vcvttq_x_f32_f16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vcvttt.f32.f16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_n_u16.c
new file mode 100644
index 0000000..e110510
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_n_u16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint32_t a, mve_pred16_t p)
+{
+ return vddupq_x_n_u16 (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vddupt.u16" } } */
+
+uint16x8_t
+foo1 (uint32_t a, mve_pred16_t p)
+{
+ return vddupq_x_u16 (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vddupt.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_n_u32.c
new file mode 100644
index 0000000..5915ab8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_n_u32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32_t a, mve_pred16_t p)
+{
+ return vddupq_x_n_u32 (a, 4, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vddupt.u32" } } */
+
+uint32x4_t
+foo1 (uint32_t a, mve_pred16_t p)
+{
+ return vddupq_x_u32 (a, 4, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vddupt.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_n_u8.c
new file mode 100644
index 0000000..7973f45
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_n_u8.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint32_t a, mve_pred16_t p)
+{
+ return vddupq_x_n_u8 (a, 4, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vddupt.u8" } } */
+
+uint8x16_t
+foo1 (uint32_t a, mve_pred16_t p)
+{
+ return vddupq_x_u8 (a, 4, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vddupt.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_wb_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_wb_u16.c
new file mode 100644
index 0000000..348cd58
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_wb_u16.c
@@ -0,0 +1,26 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32_t *a;
+
+uint16x8_t
+foo (mve_pred16_t p)
+{
+ return vddupq_x_wb_u16 (a, 2, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vddupt.u16" } } */
+
+uint16x8_t
+foo1 (mve_pred16_t p)
+{
+ return vddupq_x_u16 (a, 2, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vddupt.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_wb_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_wb_u32.c
new file mode 100644
index 0000000..c8d664d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_wb_u32.c
@@ -0,0 +1,26 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32_t *a;
+
+uint32x4_t
+foo (mve_pred16_t p)
+{
+ return vddupq_x_wb_u32 (a, 8, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vddupt.u32" } } */
+
+uint32x4_t
+foo1 (mve_pred16_t p)
+{
+ return vddupq_x_u32 (a, 8, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vddupt.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_wb_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_wb_u8.c
new file mode 100644
index 0000000..74fea79
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vddupq_x_wb_u8.c
@@ -0,0 +1,26 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32_t *a;
+
+uint8x16_t
+foo (mve_pred16_t p)
+{
+ return vddupq_x_wb_u8 (a, 8, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vddupt.u8" } } */
+
+uint8x16_t
+foo1 (mve_pred16_t p)
+{
+ return vddupq_x_u8 (a, 8, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vddupt.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_f16.c
new file mode 100644
index 0000000..b738c2b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_f16.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16_t a, mve_pred16_t p)
+{
+ return vdupq_x_n_f16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vdupt.16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_f32.c
new file mode 100644
index 0000000..2c7e49c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_f32.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32_t a, mve_pred16_t p)
+{
+ return vdupq_x_n_f32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vdupt.32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_s16.c
new file mode 100644
index 0000000..43cda91
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_s16.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16_t a, mve_pred16_t p)
+{
+ return vdupq_x_n_s16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vdupt.16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_s32.c
new file mode 100644
index 0000000..b57fe6b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_s32.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32_t a, mve_pred16_t p)
+{
+ return vdupq_x_n_s32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vdupt.32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_s8.c
new file mode 100644
index 0000000..89c13c9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_s8.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8_t a, mve_pred16_t p)
+{
+ return vdupq_x_n_s8 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vdupt.8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_u16.c
new file mode 100644
index 0000000..edadfca
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_u16.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16_t a, mve_pred16_t p)
+{
+ return vdupq_x_n_u16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vdupt.16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_u32.c
new file mode 100644
index 0000000..a4b4874
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_u32.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32_t a, mve_pred16_t p)
+{
+ return vdupq_x_n_u32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vdupt.32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_u8.c
new file mode 100644
index 0000000..cfbf97d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdupq_x_n_u8.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8_t a, mve_pred16_t p)
+{
+ return vdupq_x_n_u8 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vdupt.8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u16.c
new file mode 100644
index 0000000..9969ae3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint32_t a, uint32_t b, mve_pred16_t p)
+{
+ return vdwdupq_x_n_u16 (a, b, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vdwdupt.u16" } } */
+
+uint16x8_t
+foo1 (uint32_t a, uint32_t b, mve_pred16_t p)
+{
+ return vdwdupq_x_u16 (a, b, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vdwdupt.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u32.c
new file mode 100644
index 0000000..ae56387
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32_t a, uint32_t b, mve_pred16_t p)
+{
+ return vdwdupq_x_n_u32 (a, b, 4, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vdwdupt.u32" } } */
+
+uint32x4_t
+foo1 (uint32_t a, uint32_t b, mve_pred16_t p)
+{
+ return vdwdupq_x_u32 (a, b, 4, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vdwdupt.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u8.c
new file mode 100644
index 0000000..ee29159
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_n_u8.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint32_t a, uint32_t b, mve_pred16_t p)
+{
+ return vdwdupq_x_n_u8 (a, b, 4, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vdwdupt.u8" } } */
+
+uint8x16_t
+foo1 (uint32_t a, uint32_t b, mve_pred16_t p)
+{
+ return vdwdupq_x_u8 (a, b, 4, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vdwdupt.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u16.c
new file mode 100644
index 0000000..4e3c1bf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint32_t * a, uint32_t b, mve_pred16_t p)
+{
+ return vdwdupq_x_wb_u16 (a, b, 8, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vdwdupt.u16" } } */
+
+uint16x8_t
+foo1 (uint32_t * a, uint32_t b, mve_pred16_t p)
+{
+ return vdwdupq_x_u16 (a, b, 8, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vdwdupt.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u32.c
new file mode 100644
index 0000000..0988b07
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32_t * a, uint32_t b, mve_pred16_t p)
+{
+ return vdwdupq_x_wb_u32 (a, b, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vdwdupt.u32" } } */
+
+uint32x4_t
+foo1 (uint32_t * a, uint32_t b, mve_pred16_t p)
+{
+ return vdwdupq_x_u32 (a, b, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vdwdupt.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u8.c
new file mode 100644
index 0000000..5708947
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vdwdupq_x_wb_u8.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint32_t * a, uint32_t b, mve_pred16_t p)
+{
+ return vdwdupq_x_wb_u8 (a, b, 2, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vdwdupt.u8" } } */
+
+uint8x16_t
+foo1 (uint32_t * a, uint32_t b, mve_pred16_t p)
+{
+ return vdwdupq_x_u8 (a, b, 2, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vdwdupt.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_f16.c
new file mode 100644
index 0000000..bbabc07
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_f16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return veorq_x_f16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "veort" } } */
+
+float16x8_t
+foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return veorq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "veort" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_f32.c
new file mode 100644
index 0000000..577df0e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_f32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return veorq_x_f32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "veort" } } */
+
+float32x4_t
+foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return veorq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "veort" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_s16.c
new file mode 100644
index 0000000..88ba5f1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_s16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return veorq_x_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "veort" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return veorq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "veort" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_s32.c
new file mode 100644
index 0000000..bea7870
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_s32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return veorq_x_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "veort" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return veorq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "veort" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_s8.c
new file mode 100644
index 0000000..23bb971
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_s8.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return veorq_x_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "veort" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return veorq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "veort" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_u16.c
new file mode 100644
index 0000000..30f5268
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_u16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return veorq_x_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "veort" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return veorq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "veort" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_u32.c
new file mode 100644
index 0000000..9122574
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_u32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return veorq_x_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "veort" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return veorq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "veort" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_u8.c
new file mode 100644
index 0000000..c5babd2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/veorq_x_u8.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return veorq_x_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "veort" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return veorq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "veort" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_n_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_n_f16.c
index 09c927a..3af3d61 100644
--- a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_n_f16.c
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_n_f16.c
@@ -16,7 +16,7 @@ foo (float16x8_t a, float16x8_t b, float16_t c)
float16x8_t
foo1 (float16x8_t a, float16x8_t b, float16_t c)
{
- return vfmaq_n (a, b, c);
+ return vfmaq (a, b, c);
}
/* { dg-final { scan-assembler "vfma.f16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_n_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_n_f32.c
index ad3b7c4..b6a9205 100644
--- a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_n_f32.c
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmaq_n_f32.c
@@ -16,7 +16,7 @@ foo (float32x4_t a, float32x4_t b, float32_t c)
float32x4_t
foo1 (float32x4_t a, float32x4_t b, float32_t c)
{
- return vfmaq_n (a, b, c);
+ return vfmaq (a, b, c);
}
/* { dg-final { scan-assembler "vfma.f32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmasq_n_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmasq_n_f16.c
index 30e797e..358faf6 100644
--- a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmasq_n_f16.c
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmasq_n_f16.c
@@ -16,7 +16,7 @@ foo (float16x8_t a, float16x8_t b, float16_t c)
float16x8_t
foo1 (float16x8_t a, float16x8_t b, float16_t c)
{
- return vfmasq_n (a, b, c);
+ return vfmasq (a, b, c);
}
/* { dg-final { scan-assembler "vfmas.f16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmasq_n_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmasq_n_f32.c
index 14a45a6..02ca854 100644
--- a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmasq_n_f32.c
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vfmasq_n_f32.c
@@ -16,7 +16,7 @@ foo (float32x4_t a, float32x4_t b, float32_t c)
float32x4_t
foo1 (float32x4_t a, float32x4_t b, float32_t c)
{
- return vfmasq_n (a, b, c);
+ return vfmasq (a, b, c);
}
/* { dg-final { scan-assembler "vfmas.f32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_s16.c
new file mode 100644
index 0000000..3e27bf1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_s16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16_t b, mve_pred16_t p)
+{
+ return vhaddq_x_n_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhaddt.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16_t b, mve_pred16_t p)
+{
+ return vhaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhaddt.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_s32.c
new file mode 100644
index 0000000..8ae4f7e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_s32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32_t b, mve_pred16_t p)
+{
+ return vhaddq_x_n_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhaddt.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32_t b, mve_pred16_t p)
+{
+ return vhaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhaddt.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_s8.c
new file mode 100644
index 0000000..0c46b82
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_s8.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8_t b, mve_pred16_t p)
+{
+ return vhaddq_x_n_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhaddt.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8_t b, mve_pred16_t p)
+{
+ return vhaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhaddt.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_u16.c
new file mode 100644
index 0000000..c4814d3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_u16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16_t b, mve_pred16_t p)
+{
+ return vhaddq_x_n_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhaddt.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16_t b, mve_pred16_t p)
+{
+ return vhaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhaddt.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_u32.c
new file mode 100644
index 0000000..f8658e2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_u32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32_t b, mve_pred16_t p)
+{
+ return vhaddq_x_n_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhaddt.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32_t b, mve_pred16_t p)
+{
+ return vhaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhaddt.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_u8.c
new file mode 100644
index 0000000..b69445b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_n_u8.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8_t b, mve_pred16_t p)
+{
+ return vhaddq_x_n_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhaddt.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8_t b, mve_pred16_t p)
+{
+ return vhaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhaddt.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_s16.c
new file mode 100644
index 0000000..1c54a33
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_s16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vhaddq_x_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhaddt.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vhaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_s32.c
new file mode 100644
index 0000000..9fd4305
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_s32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vhaddq_x_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhaddt.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vhaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_s8.c
new file mode 100644
index 0000000..69f14a5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vhaddq_x_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhaddt.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vhaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_u16.c
new file mode 100644
index 0000000..010bfc5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_u16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vhaddq_x_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhaddt.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vhaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_u32.c
new file mode 100644
index 0000000..a593126
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_u32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vhaddq_x_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhaddt.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vhaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_u8.c
new file mode 100644
index 0000000..28784b3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhaddq_x_u8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vhaddq_x_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhaddt.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vhaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_x_s16.c
new file mode 100644
index 0000000..bfb3549
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_x_s16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vhcaddq_rot270_x_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhcaddt.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vhcaddq_rot270_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_x_s32.c
new file mode 100644
index 0000000..cbbcd41
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_x_s32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vhcaddq_rot270_x_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhcaddt.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vhcaddq_rot270_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_x_s8.c
new file mode 100644
index 0000000..0a06cb9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot270_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vhcaddq_rot270_x_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhcaddt.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vhcaddq_rot270_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_x_s16.c
new file mode 100644
index 0000000..d7807cc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_x_s16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vhcaddq_rot90_x_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhcaddt.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vhcaddq_rot90_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_x_s32.c
new file mode 100644
index 0000000..a55e53c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_x_s32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vhcaddq_rot90_x_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhcaddt.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vhcaddq_rot90_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_x_s8.c
new file mode 100644
index 0000000..438c9da
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhcaddq_rot90_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vhcaddq_rot90_x_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhcaddt.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vhcaddq_rot90_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_s16.c
new file mode 100644
index 0000000..d6d0458
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_s16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16_t b, mve_pred16_t p)
+{
+ return vhsubq_x_n_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhsubt.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16_t b, mve_pred16_t p)
+{
+ return vhsubq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhsubt.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_s32.c
new file mode 100644
index 0000000..ff949c8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_s32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32_t b, mve_pred16_t p)
+{
+ return vhsubq_x_n_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhsubt.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32_t b, mve_pred16_t p)
+{
+ return vhsubq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhsubt.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_s8.c
new file mode 100644
index 0000000..c322cd3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_s8.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8_t b, mve_pred16_t p)
+{
+ return vhsubq_x_n_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhsubt.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8_t b, mve_pred16_t p)
+{
+ return vhsubq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhsubt.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_u16.c
new file mode 100644
index 0000000..d43080b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_u16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16_t b, mve_pred16_t p)
+{
+ return vhsubq_x_n_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhsubt.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16_t b, mve_pred16_t p)
+{
+ return vhsubq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhsubt.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_u32.c
new file mode 100644
index 0000000..c814784
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_u32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32_t b, mve_pred16_t p)
+{
+ return vhsubq_x_n_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhsubt.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32_t b, mve_pred16_t p)
+{
+ return vhsubq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhsubt.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_u8.c
new file mode 100644
index 0000000..b2597470
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_n_u8.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8_t b, mve_pred16_t p)
+{
+ return vhsubq_x_n_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhsubt.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8_t b, mve_pred16_t p)
+{
+ return vhsubq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhsubt.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_s16.c
new file mode 100644
index 0000000..fff5b54
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_s16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vhsubq_x_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhsubt.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vhsubq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_s32.c
new file mode 100644
index 0000000..811548f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_s32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vhsubq_x_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhsubt.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vhsubq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_s8.c
new file mode 100644
index 0000000..a8e05be
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vhsubq_x_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhsubt.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vhsubq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_u16.c
new file mode 100644
index 0000000..c01cfe0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_u16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vhsubq_x_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhsubt.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vhsubq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_u32.c
new file mode 100644
index 0000000..21d5a1e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_u32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vhsubq_x_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhsubt.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vhsubq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_u8.c
new file mode 100644
index 0000000..51811e2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vhsubq_x_u8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vhsubq_x_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vhsubt.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vhsubq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_n_u16.c
new file mode 100644
index 0000000..f1bd060
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_n_u16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint32_t a, mve_pred16_t p)
+{
+ return vidupq_x_n_u16 (a, 4, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vidupt.u16" } } */
+
+uint16x8_t
+foo1 (uint32_t a, mve_pred16_t p)
+{
+ return vidupq_x_u16 (a, 4, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vidupt.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_n_u32.c
new file mode 100644
index 0000000..497f932
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_n_u32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32_t a, mve_pred16_t p)
+{
+ return vidupq_x_n_u32 (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vidupt.u32" } } */
+
+uint32x4_t
+foo1 (uint32_t a, mve_pred16_t p)
+{
+ return vidupq_x_u32 (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vidupt.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_n_u8.c
new file mode 100644
index 0000000..eafe983
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_n_u8.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint32_t a, mve_pred16_t p)
+{
+ return vidupq_x_n_u8 (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vidupt.u8" } } */
+
+uint8x16_t
+foo1 (uint32_t a, mve_pred16_t p)
+{
+ return vidupq_x_u8 (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vidupt.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_wb_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_wb_u16.c
new file mode 100644
index 0000000..97dc350
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_wb_u16.c
@@ -0,0 +1,26 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32_t *a;
+
+uint16x8_t
+foo (mve_pred16_t p)
+{
+ return vidupq_x_wb_u16 (a, 8, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vidupt.u16" } } */
+
+uint16x8_t
+foo1 (mve_pred16_t p)
+{
+ return vidupq_x_u16 (a, 8, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vidupt.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_wb_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_wb_u32.c
new file mode 100644
index 0000000..b21be73
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_wb_u32.c
@@ -0,0 +1,26 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32_t *a;
+
+uint32x4_t
+foo (mve_pred16_t p)
+{
+ return vidupq_x_wb_u32 (a, 2, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vidupt.u32" } } */
+
+uint32x4_t
+foo1 (mve_pred16_t p)
+{
+ return vidupq_x_u32 (a, 2, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vidupt.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_wb_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_wb_u8.c
new file mode 100644
index 0000000..239d04e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vidupq_x_wb_u8.c
@@ -0,0 +1,26 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32_t * a;
+
+uint8x16_t
+foo (mve_pred16_t p)
+{
+ return vidupq_x_wb_u8 (a, 2, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vidupt.u8" } } */
+
+uint8x16_t
+foo1 (mve_pred16_t p)
+{
+ return vidupq_x_u8 (a, 2, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vidupt.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_n_u16.c
new file mode 100644
index 0000000..2503927
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_n_u16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint32_t a, uint32_t b, mve_pred16_t p)
+{
+ return viwdupq_x_n_u16 (a, b, 2, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "viwdupt.u16" } } */
+
+uint16x8_t
+foo1 (uint32_t a, uint32_t b, mve_pred16_t p)
+{
+ return viwdupq_x_u16 (a, b, 2, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "viwdupt.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_n_u32.c
new file mode 100644
index 0000000..26d0e09
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_n_u32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32_t a, uint32_t b, mve_pred16_t p)
+{
+ return viwdupq_x_n_u32 (a, b, 4, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "viwdupt.u32" } } */
+
+uint32x4_t
+foo1 (uint32_t a, uint32_t b, mve_pred16_t p)
+{
+ return viwdupq_x_u32 (a, b, 4, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "viwdupt.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_n_u8.c
new file mode 100644
index 0000000..97f47a4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_n_u8.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint32_t a, uint32_t b, mve_pred16_t p)
+{
+ return viwdupq_x_n_u8 (a, b, 8, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "viwdupt.u8" } } */
+
+uint8x16_t
+foo1 (uint32_t a, uint32_t b, mve_pred16_t p)
+{
+ return viwdupq_x_u8 (a, b, 8, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "viwdupt.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u16.c
new file mode 100644
index 0000000..bbb2fa4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint32_t * a, uint32_t b, mve_pred16_t p)
+{
+ return viwdupq_x_wb_u16 (a, b, 8, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "viwdupt.u16" } } */
+
+uint16x8_t
+foo1 (uint32_t * a, uint32_t b, mve_pred16_t p)
+{
+ return viwdupq_x_u16 (a, b, 8, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "viwdupt.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u32.c
new file mode 100644
index 0000000..8b9a2c7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32_t * a, uint32_t b, mve_pred16_t p)
+{
+ return viwdupq_x_wb_u32 (a, b, 2, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "viwdupt.u32" } } */
+
+uint32x4_t
+foo1 (uint32_t * a, uint32_t b, mve_pred16_t p)
+{
+ return viwdupq_x_u32 (a, b, 2, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "viwdupt.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u8.c
new file mode 100644
index 0000000..012343b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/viwdupq_x_wb_u8.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint32_t * a, uint32_t b, mve_pred16_t p)
+{
+ return viwdupq_x_wb_u8 (a, b, 4, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "viwdupt.u8" } } */
+
+uint8x16_t
+foo1 (uint32_t * a, uint32_t b, mve_pred16_t p)
+{
+ return viwdupq_x_u8 (a, b, 4, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "viwdupt.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmq_x_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmq_x_f16.c
new file mode 100644
index 0000000..5ffedb8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmq_x_f16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vmaxnmq_x_f16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmaxnmt.f16" } } */
+
+float16x8_t
+foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vmaxnmq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmq_x_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmq_x_f32.c
new file mode 100644
index 0000000..223681a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxnmq_x_f32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vmaxnmq_x_f32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmaxnmt.f32" } } */
+
+float32x4_t
+foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vmaxnmq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_s16.c
new file mode 100644
index 0000000..caba15a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_s16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vmaxq_x_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmaxt.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vmaxq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_s32.c
new file mode 100644
index 0000000..24e0edb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_s32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vmaxq_x_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmaxt.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vmaxq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_s8.c
new file mode 100644
index 0000000..12e99b97
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vmaxq_x_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmaxt.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vmaxq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_u16.c
new file mode 100644
index 0000000..4d158f8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_u16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vmaxq_x_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmaxt.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vmaxq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_u32.c
new file mode 100644
index 0000000..2a28b3d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_u32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vmaxq_x_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmaxt.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vmaxq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_u8.c
new file mode 100644
index 0000000..6a8bc6b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmaxq_x_u8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vmaxq_x_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmaxt.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vmaxq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmq_x_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmq_x_f16.c
new file mode 100644
index 0000000..59ef96f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmq_x_f16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vminnmq_x_f16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vminnmt.f16" } } */
+
+float16x8_t
+foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vminnmq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmq_x_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmq_x_f32.c
new file mode 100644
index 0000000..6225aa1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminnmq_x_f32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vminnmq_x_f32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vminnmt.f32" } } */
+
+float32x4_t
+foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vminnmq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_s16.c
new file mode 100644
index 0000000..0f29965
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_s16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vminq_x_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmint.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vminq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_s32.c
new file mode 100644
index 0000000..f2b20f1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_s32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vminq_x_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmint.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vminq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_s8.c
new file mode 100644
index 0000000..a4b172f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vminq_x_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmint.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vminq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_u16.c
new file mode 100644
index 0000000..e4643ad
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_u16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vminq_x_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmint.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vminq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_u32.c
new file mode 100644
index 0000000..a042749
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_u32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vminq_x_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmint.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vminq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_u8.c
new file mode 100644
index 0000000..cb578ab
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vminq_x_u8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vminq_x_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmint.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vminq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_x_s16.c
new file mode 100644
index 0000000..a55e093
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_x_s16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int16x8_t a, mve_pred16_t p)
+{
+ return vmovlbq_x_s16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmovlbt.s16" } } */
+
+int32x4_t
+foo1 (int16x8_t a, mve_pred16_t p)
+{
+ return vmovlbq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_x_s8.c
new file mode 100644
index 0000000..8462ca4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int8x16_t a, mve_pred16_t p)
+{
+ return vmovlbq_x_s8 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmovlbt.s8" } } */
+
+int16x8_t
+foo1 (int8x16_t a, mve_pred16_t p)
+{
+ return vmovlbq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_x_u16.c
new file mode 100644
index 0000000..d51eae8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_x_u16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint16x8_t a, mve_pred16_t p)
+{
+ return vmovlbq_x_u16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmovlbt.u16" } } */
+
+uint32x4_t
+foo1 (uint16x8_t a, mve_pred16_t p)
+{
+ return vmovlbq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_x_u8.c
new file mode 100644
index 0000000..f9040cd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovlbq_x_u8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint8x16_t a, mve_pred16_t p)
+{
+ return vmovlbq_x_u8 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmovlbt.u8" } } */
+
+uint16x8_t
+foo1 (uint8x16_t a, mve_pred16_t p)
+{
+ return vmovlbq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_x_s16.c
new file mode 100644
index 0000000..6168454
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_x_s16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int16x8_t a, mve_pred16_t p)
+{
+ return vmovltq_x_s16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmovltt.s16" } } */
+
+int32x4_t
+foo1 (int16x8_t a, mve_pred16_t p)
+{
+ return vmovltq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_x_s8.c
new file mode 100644
index 0000000..3a8c081
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int8x16_t a, mve_pred16_t p)
+{
+ return vmovltq_x_s8 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmovltt.s8" } } */
+
+int16x8_t
+foo1 (int8x16_t a, mve_pred16_t p)
+{
+ return vmovltq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_x_u16.c
new file mode 100644
index 0000000..9858305
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_x_u16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint16x8_t a, mve_pred16_t p)
+{
+ return vmovltq_x_u16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmovltt.u16" } } */
+
+uint32x4_t
+foo1 (uint16x8_t a, mve_pred16_t p)
+{
+ return vmovltq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_x_u8.c
new file mode 100644
index 0000000..9072372
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmovltq_x_u8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint8x16_t a, mve_pred16_t p)
+{
+ return vmovltq_x_u8 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmovltt.u8" } } */
+
+uint16x8_t
+foo1 (uint8x16_t a, mve_pred16_t p)
+{
+ return vmovltq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_s16.c
new file mode 100644
index 0000000..d8d5b01
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_s16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vmulhq_x_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmulht.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vmulhq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_s32.c
new file mode 100644
index 0000000..ca46762
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_s32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vmulhq_x_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmulht.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vmulhq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_s8.c
new file mode 100644
index 0000000..9e154a5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vmulhq_x_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmulht.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vmulhq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_u16.c
new file mode 100644
index 0000000..9cee31a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_u16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vmulhq_x_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmulht.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vmulhq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_u32.c
new file mode 100644
index 0000000..0bc19e3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_u32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vmulhq_x_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmulht.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vmulhq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_u8.c
new file mode 100644
index 0000000..feb2fa4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulhq_x_u8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vmulhq_x_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmulht.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vmulhq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_s16.c
new file mode 100644
index 0000000..f24ec82
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_s16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vmullbq_int_x_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmullbt.s16" } } */
+
+int32x4_t
+foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vmullbq_int_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_s32.c
new file mode 100644
index 0000000..ffc77b8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_s32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int64x2_t
+foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vmullbq_int_x_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmullbt.s32" } } */
+
+int64x2_t
+foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vmullbq_int_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_s8.c
new file mode 100644
index 0000000..4952690
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vmullbq_int_x_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmullbt.s8" } } */
+
+int16x8_t
+foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vmullbq_int_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_u16.c
new file mode 100644
index 0000000..58cd666
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_u16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vmullbq_int_x_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmullbt.u16" } } */
+
+uint32x4_t
+foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vmullbq_int_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_u32.c
new file mode 100644
index 0000000..7029df8f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_u32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint64x2_t
+foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vmullbq_int_x_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmullbt.u32" } } */
+
+uint64x2_t
+foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vmullbq_int_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_u8.c
new file mode 100644
index 0000000..7b7efc3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_int_x_u8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vmullbq_int_x_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmullbt.u8" } } */
+
+uint16x8_t
+foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vmullbq_int_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_poly_x_p16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_poly_x_p16.c
new file mode 100644
index 0000000..b18fb29
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_poly_x_p16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vmullbq_poly_x_p16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmullbt.p16" } } */
+
+uint32x4_t
+foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vmullbq_poly_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_poly_x_p8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_poly_x_p8.c
new file mode 100644
index 0000000..4e5a5e3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmullbq_poly_x_p8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vmullbq_poly_x_p8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmullbt.p8" } } */
+
+uint16x8_t
+foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vmullbq_poly_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_s16.c
new file mode 100644
index 0000000..0aa2ebd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_s16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vmulltq_int_x_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmulltt.s16" } } */
+
+int32x4_t
+foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vmulltq_int_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_s32.c
new file mode 100644
index 0000000..491d050
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_s32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int64x2_t
+foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vmulltq_int_x_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmulltt.s32" } } */
+
+int64x2_t
+foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vmulltq_int_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_s8.c
new file mode 100644
index 0000000..f0e1aaf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vmulltq_int_x_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmulltt.s8" } } */
+
+int16x8_t
+foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vmulltq_int_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_u16.c
new file mode 100644
index 0000000..21d094d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_u16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vmulltq_int_x_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmulltt.u16" } } */
+
+uint32x4_t
+foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vmulltq_int_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_u32.c
new file mode 100644
index 0000000..5ec93e3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_u32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint64x2_t
+foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vmulltq_int_x_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmulltt.u32" } } */
+
+uint64x2_t
+foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vmulltq_int_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_u8.c
new file mode 100644
index 0000000..be9d922
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_int_x_u8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vmulltq_int_x_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmulltt.u8" } } */
+
+uint16x8_t
+foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vmulltq_int_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_poly_x_p16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_poly_x_p16.c
new file mode 100644
index 0000000..5a5dec0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_poly_x_p16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vmulltq_poly_x_p16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmulltt.p16" } } */
+
+uint32x4_t
+foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vmulltq_poly_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_poly_x_p8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_poly_x_p8.c
new file mode 100644
index 0000000..ff8eccd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulltq_poly_x_p8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vmulltq_poly_x_p8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmulltt.p8" } } */
+
+uint16x8_t
+foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vmulltq_poly_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_f16.c
new file mode 100644
index 0000000..2a44a45
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_f16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vmulq_x_f16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.f16" } } */
+
+float16x8_t
+foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vmulq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.f16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_f32.c
new file mode 100644
index 0000000..f33f085
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_f32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vmulq_x_f32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.f32" } } */
+
+float32x4_t
+foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vmulq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.f32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_f16.c
new file mode 100644
index 0000000..2995890
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_f16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, float16_t b, mve_pred16_t p)
+{
+ return vmulq_x_n_f16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.f16" } } */
+
+float16x8_t
+foo1 (float16x8_t a, float16_t b, mve_pred16_t p)
+{
+ return vmulq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.f16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_f32.c
new file mode 100644
index 0000000..53aa6b7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_f32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, float32_t b, mve_pred16_t p)
+{
+ return vmulq_x_n_f32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.f32" } } */
+
+float32x4_t
+foo1 (float32x4_t a, float32_t b, mve_pred16_t p)
+{
+ return vmulq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.f32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_s16.c
new file mode 100644
index 0000000..fc64cc1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_s16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16_t b, mve_pred16_t p)
+{
+ return vmulq_x_n_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.i16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16_t b, mve_pred16_t p)
+{
+ return vmulq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_s32.c
new file mode 100644
index 0000000..28a5d0b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_s32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32_t b, mve_pred16_t p)
+{
+ return vmulq_x_n_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.i32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32_t b, mve_pred16_t p)
+{
+ return vmulq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_s8.c
new file mode 100644
index 0000000..b3d2419
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_s8.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8_t b, mve_pred16_t p)
+{
+ return vmulq_x_n_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.i8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8_t b, mve_pred16_t p)
+{
+ return vmulq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_u16.c
new file mode 100644
index 0000000..7864a13
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_u16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16_t b, mve_pred16_t p)
+{
+ return vmulq_x_n_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.i16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16_t b, mve_pred16_t p)
+{
+ return vmulq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_u32.c
new file mode 100644
index 0000000..29bec64
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_u32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32_t b, mve_pred16_t p)
+{
+ return vmulq_x_n_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.i32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32_t b, mve_pred16_t p)
+{
+ return vmulq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_u8.c
new file mode 100644
index 0000000..88afbeb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_n_u8.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8_t b, mve_pred16_t p)
+{
+ return vmulq_x_n_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.i8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8_t b, mve_pred16_t p)
+{
+ return vmulq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_s16.c
new file mode 100644
index 0000000..5ca463a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_s16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vmulq_x_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.i16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vmulq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_s32.c
new file mode 100644
index 0000000..2d29ff0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_s32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vmulq_x_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.i32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vmulq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_s8.c
new file mode 100644
index 0000000..3c37227
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_s8.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vmulq_x_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.i8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vmulq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_u16.c
new file mode 100644
index 0000000..34461fb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_u16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vmulq_x_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.i16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vmulq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_u32.c
new file mode 100644
index 0000000..b8c46c3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_u32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vmulq_x_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.i32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vmulq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_u8.c
new file mode 100644
index 0000000..d37f305
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmulq_x_u8.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vmulq_x_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.i8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vmulq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmult.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_n_s16.c
new file mode 100644
index 0000000..47e306d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_n_s16.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (mve_pred16_t p)
+{
+ return vmvnq_x_n_s16 (2, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmvnt.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_n_s32.c
new file mode 100644
index 0000000..e93f4c7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_n_s32.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (mve_pred16_t p)
+{
+ return vmvnq_x_n_s32 (2, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmvnt.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_n_u16.c
new file mode 100644
index 0000000..3839645
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_n_u16.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (mve_pred16_t p)
+{
+ return vmvnq_x_n_u16 (4, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmvnt.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_n_u32.c
new file mode 100644
index 0000000..885f61c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_n_u32.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (mve_pred16_t p)
+{
+ return vmvnq_x_n_u32 (4, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmvnt.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_s16.c
new file mode 100644
index 0000000..0ba4d74
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_s16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, mve_pred16_t p)
+{
+ return vmvnq_x_s16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmvnt" } } */
+
+int16x8_t
+foo1 (int16x8_t a, mve_pred16_t p)
+{
+ return vmvnq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmvnt" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_s32.c
new file mode 100644
index 0000000..e040d3f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_s32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, mve_pred16_t p)
+{
+ return vmvnq_x_s32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmvnt" } } */
+
+int32x4_t
+foo1 (int32x4_t a, mve_pred16_t p)
+{
+ return vmvnq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmvnt" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_s8.c
new file mode 100644
index 0000000..0205103
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_s8.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, mve_pred16_t p)
+{
+ return vmvnq_x_s8 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmvnt" } } */
+
+int8x16_t
+foo1 (int8x16_t a, mve_pred16_t p)
+{
+ return vmvnq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmvnt" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_u16.c
new file mode 100644
index 0000000..1f55d61
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_u16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, mve_pred16_t p)
+{
+ return vmvnq_x_u16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmvnt" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, mve_pred16_t p)
+{
+ return vmvnq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmvnt" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_u32.c
new file mode 100644
index 0000000..c5fb84d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_u32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, mve_pred16_t p)
+{
+ return vmvnq_x_u32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmvnt" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, mve_pred16_t p)
+{
+ return vmvnq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmvnt" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_u8.c
new file mode 100644
index 0000000..0b46308
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vmvnq_x_u8.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, mve_pred16_t p)
+{
+ return vmvnq_x_u8 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmvnt" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, mve_pred16_t p)
+{
+ return vmvnq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vmvnt" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_f16.c
new file mode 100644
index 0000000..6719f8b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_f16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, mve_pred16_t p)
+{
+ return vnegq_x_f16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vnegt.f16" } } */
+
+float16x8_t
+foo1 (float16x8_t a, mve_pred16_t p)
+{
+ return vnegq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_f32.c
new file mode 100644
index 0000000..0b5fc01
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_f32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, mve_pred16_t p)
+{
+ return vnegq_x_f32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vnegt.f32" } } */
+
+float32x4_t
+foo1 (float32x4_t a, mve_pred16_t p)
+{
+ return vnegq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_s16.c
new file mode 100644
index 0000000..c1e5130
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_s16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, mve_pred16_t p)
+{
+ return vnegq_x_s16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vnegt.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, mve_pred16_t p)
+{
+ return vnegq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_s32.c
new file mode 100644
index 0000000..cdd0b8b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_s32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, mve_pred16_t p)
+{
+ return vnegq_x_s32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vnegt.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, mve_pred16_t p)
+{
+ return vnegq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_s8.c
new file mode 100644
index 0000000..5308961
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vnegq_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, mve_pred16_t p)
+{
+ return vnegq_x_s8 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vnegt.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, mve_pred16_t p)
+{
+ return vnegq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_f16.c
new file mode 100644
index 0000000..061e7bf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_f16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vornq_x_f16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vornt" } } */
+
+float16x8_t
+foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vornq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_f32.c
new file mode 100644
index 0000000..e481083
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_f32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vornq_x_f32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vornt" } } */
+
+float32x4_t
+foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vornq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_s16.c
new file mode 100644
index 0000000..176061e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_s16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vornq_x_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vornt" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vornq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_s32.c
new file mode 100644
index 0000000..7c1562e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_s32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vornq_x_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vornt" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vornq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_s8.c
new file mode 100644
index 0000000..017359b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vornq_x_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vornt" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vornq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_u16.c
new file mode 100644
index 0000000..f5ae555
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_u16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vornq_x_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vornt" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vornq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_u32.c
new file mode 100644
index 0000000..5e28447
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_u32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vornq_x_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vornt" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vornq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_u8.c
new file mode 100644
index 0000000..7e19d00
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vornq_x_u8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vornq_x_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vornt" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vornq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_f16.c
new file mode 100644
index 0000000..df93d29
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_f16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vorrq_x_f16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vorrt" } } */
+
+float16x8_t
+foo1 (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vorrq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_f32.c
new file mode 100644
index 0000000..356d991
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_f32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vorrq_x_f32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vorrt" } } */
+
+float32x4_t
+foo1 (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vorrq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_s16.c
new file mode 100644
index 0000000..01a2b58
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_s16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vorrq_x_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vorrt" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vorrq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_s32.c
new file mode 100644
index 0000000..b332474
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_s32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vorrq_x_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vorrt" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vorrq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_s8.c
new file mode 100644
index 0000000..a62b24e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vorrq_x_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vorrt" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vorrq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_u16.c
new file mode 100644
index 0000000..2054d47
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_u16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vorrq_x_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vorrt" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vorrq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_u32.c
new file mode 100644
index 0000000..37e828b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_u32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vorrq_x_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vorrt" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vorrq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_u8.c
new file mode 100644
index 0000000..2a6a273
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vorrq_x_u8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vorrq_x_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vorrt" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vorrq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev16q_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev16q_x_s8.c
new file mode 100644
index 0000000..917012f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev16q_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, mve_pred16_t p)
+{
+ return vrev16q_x_s8 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrev16t.8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, mve_pred16_t p)
+{
+ return vrev16q_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev16q_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev16q_x_u8.c
new file mode 100644
index 0000000..e91ef20
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev16q_x_u8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, mve_pred16_t p)
+{
+ return vrev16q_x_u8 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrev16t.8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, mve_pred16_t p)
+{
+ return vrev16q_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_f16.c
new file mode 100644
index 0000000..7f0eafc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_f16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, mve_pred16_t p)
+{
+ return vrev32q_x_f16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrev32t.16" } } */
+
+float16x8_t
+foo1 (float16x8_t a, mve_pred16_t p)
+{
+ return vrev32q_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_s16.c
new file mode 100644
index 0000000..24613a5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_s16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, mve_pred16_t p)
+{
+ return vrev32q_x_s16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrev32t.16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, mve_pred16_t p)
+{
+ return vrev32q_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_s8.c
new file mode 100644
index 0000000..29eb483
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, mve_pred16_t p)
+{
+ return vrev32q_x_s8 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrev32t.8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, mve_pred16_t p)
+{
+ return vrev32q_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_u16.c
new file mode 100644
index 0000000..0d4ad70
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_u16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, mve_pred16_t p)
+{
+ return vrev32q_x_u16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrev32t.16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, mve_pred16_t p)
+{
+ return vrev32q_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_u8.c
new file mode 100644
index 0000000..9dfad95
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev32q_x_u8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, mve_pred16_t p)
+{
+ return vrev32q_x_u8 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrev32t.8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, mve_pred16_t p)
+{
+ return vrev32q_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_f16.c
new file mode 100644
index 0000000..b866979
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_f16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, mve_pred16_t p)
+{
+ return vrev64q_x_f16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrev64t.16" } } */
+
+float16x8_t
+foo1 (float16x8_t a, mve_pred16_t p)
+{
+ return vrev64q_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_f32.c
new file mode 100644
index 0000000..986efeb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_f32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, mve_pred16_t p)
+{
+ return vrev64q_x_f32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrev64t.32" } } */
+
+float32x4_t
+foo1 (float32x4_t a, mve_pred16_t p)
+{
+ return vrev64q_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_s16.c
new file mode 100644
index 0000000..51165f1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_s16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, mve_pred16_t p)
+{
+ return vrev64q_x_s16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrev64t.16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, mve_pred16_t p)
+{
+ return vrev64q_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_s32.c
new file mode 100644
index 0000000..8297d72
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_s32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, mve_pred16_t p)
+{
+ return vrev64q_x_s32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrev64t.32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, mve_pred16_t p)
+{
+ return vrev64q_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_s8.c
new file mode 100644
index 0000000..b9763c5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, mve_pred16_t p)
+{
+ return vrev64q_x_s8 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrev64t.8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, mve_pred16_t p)
+{
+ return vrev64q_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_u16.c
new file mode 100644
index 0000000..f57373a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_u16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, mve_pred16_t p)
+{
+ return vrev64q_x_u16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrev64t.16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, mve_pred16_t p)
+{
+ return vrev64q_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_u32.c
new file mode 100644
index 0000000..17cdd87
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_u32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, mve_pred16_t p)
+{
+ return vrev64q_x_u32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrev64t.32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, mve_pred16_t p)
+{
+ return vrev64q_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_u8.c
new file mode 100644
index 0000000..4c9ce6a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrev64q_x_u8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, mve_pred16_t p)
+{
+ return vrev64q_x_u8 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrev64t.8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, mve_pred16_t p)
+{
+ return vrev64q_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_s16.c
new file mode 100644
index 0000000..cc51dc2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_s16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vrhaddq_x_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrhaddt.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vrhaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_s32.c
new file mode 100644
index 0000000..24264716
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_s32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vrhaddq_x_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrhaddt.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vrhaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_s8.c
new file mode 100644
index 0000000..2fd551f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vrhaddq_x_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrhaddt.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vrhaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_u16.c
new file mode 100644
index 0000000..0565390
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_u16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vrhaddq_x_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrhaddt.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vrhaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_u32.c
new file mode 100644
index 0000000..81ad0b8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_u32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vrhaddq_x_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrhaddt.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vrhaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_u8.c
new file mode 100644
index 0000000..cad187a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrhaddq_x_u8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vrhaddq_x_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrhaddt.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vrhaddq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_s16.c
new file mode 100644
index 0000000..2b57a54
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_s16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vrmulhq_x_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrmulht.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vrmulhq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_s32.c
new file mode 100644
index 0000000..ac8e286
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_s32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vrmulhq_x_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrmulht.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vrmulhq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_s8.c
new file mode 100644
index 0000000..01b704d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vrmulhq_x_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrmulht.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vrmulhq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_u16.c
new file mode 100644
index 0000000..a944fcd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_u16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vrmulhq_x_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrmulht.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vrmulhq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_u32.c
new file mode 100644
index 0000000..7720f7e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_u32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vrmulhq_x_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrmulht.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vrmulhq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_u8.c
new file mode 100644
index 0000000..0dfa015
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrmulhq_x_u8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vrmulhq_x_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrmulht.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vrmulhq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndaq_x_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndaq_x_f16.c
new file mode 100644
index 0000000..1af1699
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndaq_x_f16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, mve_pred16_t p)
+{
+ return vrndaq_x_f16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrintat.f16" } } */
+
+float16x8_t
+foo1 (float16x8_t a, mve_pred16_t p)
+{
+ return vrndaq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndaq_x_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndaq_x_f32.c
new file mode 100644
index 0000000..c641902
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndaq_x_f32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, mve_pred16_t p)
+{
+ return vrndaq_x_f32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrintat.f32" } } */
+
+float32x4_t
+foo1 (float32x4_t a, mve_pred16_t p)
+{
+ return vrndaq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndmq_x_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndmq_x_f16.c
new file mode 100644
index 0000000..8e77f94
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndmq_x_f16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, mve_pred16_t p)
+{
+ return vrndmq_x_f16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrintmt.f16" } } */
+
+float16x8_t
+foo1 (float16x8_t a, mve_pred16_t p)
+{
+ return vrndmq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndmq_x_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndmq_x_f32.c
new file mode 100644
index 0000000..a8e232f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndmq_x_f32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, mve_pred16_t p)
+{
+ return vrndmq_x_f32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrintmt.f32" } } */
+
+float32x4_t
+foo1 (float32x4_t a, mve_pred16_t p)
+{
+ return vrndmq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndnq_x_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndnq_x_f16.c
new file mode 100644
index 0000000..3be1ac8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndnq_x_f16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, mve_pred16_t p)
+{
+ return vrndnq_x_f16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrintnt.f16" } } */
+
+float16x8_t
+foo1 (float16x8_t a, mve_pred16_t p)
+{
+ return vrndnq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndnq_x_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndnq_x_f32.c
new file mode 100644
index 0000000..d2c4f58
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndnq_x_f32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, mve_pred16_t p)
+{
+ return vrndnq_x_f32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrintnt.f32" } } */
+
+float32x4_t
+foo1 (float32x4_t a, mve_pred16_t p)
+{
+ return vrndnq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndpq_x_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndpq_x_f16.c
new file mode 100644
index 0000000..41623ae
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndpq_x_f16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, mve_pred16_t p)
+{
+ return vrndpq_x_f16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrintpt.f16" } } */
+
+float16x8_t
+foo1 (float16x8_t a, mve_pred16_t p)
+{
+ return vrndpq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndpq_x_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndpq_x_f32.c
new file mode 100644
index 0000000..7ce7cda
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndpq_x_f32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, mve_pred16_t p)
+{
+ return vrndpq_x_f32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrintpt.f32" } } */
+
+float32x4_t
+foo1 (float32x4_t a, mve_pred16_t p)
+{
+ return vrndpq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndq_x_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndq_x_f16.c
new file mode 100644
index 0000000..dfb04bb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndq_x_f16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, mve_pred16_t p)
+{
+ return vrndq_x_f16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrintzt.f16" } } */
+
+float16x8_t
+foo1 (float16x8_t a, mve_pred16_t p)
+{
+ return vrndq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndq_x_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndq_x_f32.c
new file mode 100644
index 0000000..2b695f6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndq_x_f32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, mve_pred16_t p)
+{
+ return vrndq_x_f32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrintzt.f32" } } */
+
+float32x4_t
+foo1 (float32x4_t a, mve_pred16_t p)
+{
+ return vrndq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndxq_x_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndxq_x_f16.c
new file mode 100644
index 0000000..c3efca5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndxq_x_f16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, mve_pred16_t p)
+{
+ return vrndxq_x_f16 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrintxt.f16" } } */
+
+float16x8_t
+foo1 (float16x8_t a, mve_pred16_t p)
+{
+ return vrndxq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndxq_x_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndxq_x_f32.c
new file mode 100644
index 0000000..0c78ce5f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrndxq_x_f32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, mve_pred16_t p)
+{
+ return vrndxq_x_f32 (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrintxt.f32" } } */
+
+float32x4_t
+foo1 (float32x4_t a, mve_pred16_t p)
+{
+ return vrndxq_x (a, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_s16.c
new file mode 100644
index 0000000..a31adc9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_s16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vrshlq_x_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrshlt.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vrshlq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_s32.c
new file mode 100644
index 0000000..98a90fb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_s32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vrshlq_x_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrshlt.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vrshlq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_s8.c
new file mode 100644
index 0000000..8fcf7f7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_s8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vrshlq_x_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrshlt.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vrshlq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_u16.c
new file mode 100644
index 0000000..d8a5b73
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_u16.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vrshlq_x_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrshlt.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vrshlq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_u32.c
new file mode 100644
index 0000000..39af58b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_u32.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vrshlq_x_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrshlt.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vrshlq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_u8.c
new file mode 100644
index 0000000..323d4ab
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshlq_x_u8.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vrshlq_x_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrshlt.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vrshlq_x (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_s16.c
new file mode 100644
index 0000000..c8dc5fc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_s16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, mve_pred16_t p)
+{
+ return vrshrq_x_n_s16 (a, 16, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrshrt.s16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, mve_pred16_t p)
+{
+ return vrshrq_x (a, 16, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrshrt.s16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_s32.c
new file mode 100644
index 0000000..c8f4978
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_s32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, mve_pred16_t p)
+{
+ return vrshrq_x_n_s32 (a, 32, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrshrt.s32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, mve_pred16_t p)
+{
+ return vrshrq_x (a, 32, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrshrt.s32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_s8.c
new file mode 100644
index 0000000..eb94ccb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_s8.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, mve_pred16_t p)
+{
+ return vrshrq_x_n_s8 (a, 8, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrshrt.s8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, mve_pred16_t p)
+{
+ return vrshrq_x (a, 8, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrshrt.s8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_u16.c
new file mode 100644
index 0000000..bb18df6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_u16.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, mve_pred16_t p)
+{
+ return vrshrq_x_n_u16 (a, 16, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrshrt.u16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, mve_pred16_t p)
+{
+ return vrshrq_x (a, 16, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrshrt.u16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_u32.c
new file mode 100644
index 0000000..19d1a8d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_u32.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, mve_pred16_t p)
+{
+ return vrshrq_x_n_u32 (a, 32, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrshrt.u32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, mve_pred16_t p)
+{
+ return vrshrq_x (a, 32, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrshrt.u32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_u8.c
new file mode 100644
index 0000000..25bf957
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vrshrq_x_n_u8.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, mve_pred16_t p)
+{
+ return vrshrq_x_n_u8 (a, 8, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrshrt.u8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, mve_pred16_t p)
+{
+ return vrshrq_x (a, 8, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vrshrt.u8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_x_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_x_n_s16.c
new file mode 100644
index 0000000..60fa99e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_x_n_s16.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int16x8_t a, mve_pred16_t p)
+{
+ return vshllbq_x_n_s16 (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshllbt.s16" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_x_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_x_n_s8.c
new file mode 100644
index 0000000..ae817df
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_x_n_s8.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int8x16_t a, mve_pred16_t p)
+{
+ return vshllbq_x_n_s8 (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshllbt.s8" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_x_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_x_n_u16.c
new file mode 100644
index 0000000..07c85d9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_x_n_u16.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint16x8_t a, mve_pred16_t p)
+{
+ return vshllbq_x_n_u16 (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshllbt.u16" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_x_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_x_n_u8.c
new file mode 100644
index 0000000..0ac6383
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshllbq_x_n_u8.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint8x16_t a, mve_pred16_t p)
+{
+ return vshllbq_x_n_u8 (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshllbt.u8" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_x_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_x_n_s16.c
new file mode 100644
index 0000000..e89fb56
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_x_n_s16.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int16x8_t a, mve_pred16_t p)
+{
+ return vshlltq_x_n_s16 (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlltt.s16" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_x_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_x_n_s8.c
new file mode 100644
index 0000000..5822014
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_x_n_s8.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int8x16_t a, mve_pred16_t p)
+{
+ return vshlltq_x_n_s8 (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlltt.s8" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_x_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_x_n_u16.c
new file mode 100644
index 0000000..7b01bc9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_x_n_u16.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint16x8_t a, mve_pred16_t p)
+{
+ return vshlltq_x_n_u16 (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlltt.u16" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_x_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_x_n_u8.c
new file mode 100644
index 0000000..1716771
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlltq_x_n_u8.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint8x16_t a, mve_pred16_t p)
+{
+ return vshlltq_x_n_u8 (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlltt.u8" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_s16.c
new file mode 100644
index 0000000..b14f188
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_s16.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, mve_pred16_t p)
+{
+ return vshlq_x_n_s16 (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlt.s16" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_s32.c
new file mode 100644
index 0000000..aa50f08
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_s32.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, mve_pred16_t p)
+{
+ return vshlq_x_n_s32 (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlt.s32" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_s8.c
new file mode 100644
index 0000000..4fb36fc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_s8.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, mve_pred16_t p)
+{
+ return vshlq_x_n_s8 (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlt.s8" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_u16.c
new file mode 100644
index 0000000..2222300
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_u16.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, mve_pred16_t p)
+{
+ return vshlq_x_n_u16 (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlt.u16" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_u32.c
new file mode 100644
index 0000000..fc63fa8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_u32.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, mve_pred16_t p)
+{
+ return vshlq_x_n_u32 (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlt.u32" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_u8.c
new file mode 100644
index 0000000..85af514
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_n_u8.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, mve_pred16_t p)
+{
+ return vshlq_x_n_u8 (a, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlt.u8" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_s16.c
new file mode 100644
index 0000000..a4ccdec
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_s16.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vshlq_x_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlt.s16" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_s32.c
new file mode 100644
index 0000000..b9db0ac
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_s32.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vshlq_x_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlt.s32" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_s8.c
new file mode 100644
index 0000000..410adeb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_s8.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vshlq_x_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlt.s8" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_u16.c
new file mode 100644
index 0000000..c40e854
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_u16.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vshlq_x_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlt.u16" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_u32.c
new file mode 100644
index 0000000..49533dd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_u32.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vshlq_x_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlt.u32" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_u8.c
new file mode 100644
index 0000000..e5c2500
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlq_x_u8.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vshlq_x_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlt.u8" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_s16.c
new file mode 100644
index 0000000..864f8db
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_s16.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, mve_pred16_t p)
+{
+ return vshrq_x_n_s16 (a, 16, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshrt.s16" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_s32.c
new file mode 100644
index 0000000..62efb79
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_s32.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, mve_pred16_t p)
+{
+ return vshrq_x_n_s32 (a, 32, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshrt.s32" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_s8.c
new file mode 100644
index 0000000..666319f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_s8.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, mve_pred16_t p)
+{
+ return vshrq_x_n_s8 (a, 8, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshrt.s8" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_u16.c
new file mode 100644
index 0000000..cbeaca5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_u16.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, mve_pred16_t p)
+{
+ return vshrq_x_n_u16 (a, 16, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshrt.u16" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_u8.c
new file mode 100644
index 0000000..09de106
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshrq_x_n_u8.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, mve_pred16_t p)
+{
+ return vshrq_x_n_u8 (a, 8, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshrt.u8" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_f16.c
new file mode 100644
index 0000000..885563e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_f16.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, float16x8_t b, mve_pred16_t p)
+{
+ return vsubq_x_f16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsubt.f16" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_f32.c
new file mode 100644
index 0000000..caaef42
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_f32.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, float32x4_t b, mve_pred16_t p)
+{
+ return vsubq_x_f32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsubt.f32" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_f16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_f16.c
new file mode 100644
index 0000000..8dc8661
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_f16.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, float16_t b, mve_pred16_t p)
+{
+ return vsubq_x_n_f16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsubt.f16" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_f32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_f32.c
new file mode 100644
index 0000000..546662e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_f32.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */
+/* { dg-add-options arm_v8_1m_mve_fp } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, float32_t b, mve_pred16_t p)
+{
+ return vsubq_x_n_f32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsubt.f32" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_s16.c
new file mode 100644
index 0000000..e8715e85
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_s16.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16_t b, mve_pred16_t p)
+{
+ return vsubq_x_n_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsubt.i16" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_s32.c
new file mode 100644
index 0000000..5ae1b46
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_s32.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32_t b, mve_pred16_t p)
+{
+ return vsubq_x_n_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsubt.i32" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_s8.c
new file mode 100644
index 0000000..1c39f5e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_s8.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8_t b, mve_pred16_t p)
+{
+ return vsubq_x_n_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsubt.i8" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_u16.c
new file mode 100644
index 0000000..9047fd6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_u16.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16_t b, mve_pred16_t p)
+{
+ return vsubq_x_n_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsubt.i16" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_u32.c
new file mode 100644
index 0000000..d74b0e6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_u32.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32_t b, mve_pred16_t p)
+{
+ return vsubq_x_n_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsubt.i32" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_u8.c
new file mode 100644
index 0000000..e14d55d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_n_u8.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8_t b, mve_pred16_t p)
+{
+ return vsubq_x_n_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsubt.i8" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_s16.c
new file mode 100644
index 0000000..370ef71
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_s16.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b, mve_pred16_t p)
+{
+ return vsubq_x_s16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsubt.i16" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_s32.c
new file mode 100644
index 0000000..f4af97c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_s32.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b, mve_pred16_t p)
+{
+ return vsubq_x_s32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsubt.i32" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_s8.c
new file mode 100644
index 0000000..e1ac508
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_s8.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b, mve_pred16_t p)
+{
+ return vsubq_x_s8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsubt.i8" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_u16.c
new file mode 100644
index 0000000..f9a951a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_u16.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b, mve_pred16_t p)
+{
+ return vsubq_x_u16 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsubt.i16" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_u32.c
new file mode 100644
index 0000000..593b8a3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_u32.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b, mve_pred16_t p)
+{
+ return vsubq_x_u32 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsubt.i32" } } */
+
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_u8.c
new file mode 100644
index 0000000..5e194b0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsubq_x_u8.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b, mve_pred16_t p)
+{
+ return vsubq_x_u8 (a, b, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vsubt.i8" } } */
+