diff options
Diffstat (limited to 'clang/test/CodeGen')
-rw-r--r-- | clang/test/CodeGen/X86/avx2-builtins.c | 2 | ||||
-rw-r--r-- | clang/test/CodeGen/X86/avx512bw-builtins.c | 9 | ||||
-rw-r--r-- | clang/test/CodeGen/X86/avx512cd-builtins.c | 32 | ||||
-rw-r--r-- | clang/test/CodeGen/X86/avx512dq-builtins.c | 13 | ||||
-rw-r--r-- | clang/test/CodeGen/X86/avx512f-builtins.c | 14 | ||||
-rw-r--r-- | clang/test/CodeGen/X86/avx512vl-builtins.c | 44 | ||||
-rw-r--r-- | clang/test/CodeGen/X86/avx512vlbw-builtins.c | 13 | ||||
-rw-r--r-- | clang/test/CodeGen/X86/avx512vlcd-builtins.c | 59 | ||||
-rw-r--r-- | clang/test/CodeGen/X86/avx512vldq-builtins.c | 18 | ||||
-rw-r--r-- | clang/test/CodeGen/X86/mmx-builtins.c | 2 | ||||
-rw-r--r-- | clang/test/CodeGen/X86/ssse3-builtins.c | 2 | ||||
-rw-r--r-- | clang/test/CodeGen/arm-mve-intrinsics/load-store.c | 420 | ||||
-rw-r--r-- | clang/test/CodeGen/attr-target-mv.c | 10 | ||||
-rw-r--r-- | clang/test/CodeGen/distributed-thin-lto/supports-hot-cold-new.ll | 9 | ||||
-rw-r--r-- | clang/test/CodeGen/target-builtin-noerror.c | 2 | ||||
-rw-r--r-- | clang/test/CodeGen/unified-lto-module-flag.ll | 11 |
16 files changed, 430 insertions, 230 deletions
diff --git a/clang/test/CodeGen/X86/avx2-builtins.c b/clang/test/CodeGen/X86/avx2-builtins.c index dc64f96..b798618 100644 --- a/clang/test/CodeGen/X86/avx2-builtins.c +++ b/clang/test/CodeGen/X86/avx2-builtins.c @@ -1130,6 +1130,8 @@ __m256i test_mm256_shuffle_epi8(__m256i a, __m256i b) { return _mm256_shuffle_epi8(a, b); } +TEST_CONSTEXPR(match_v32qi(_mm256_shuffle_epi8((__m256i)(__v32qi){0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31}, (__m256i)(__v32qs){0,33,2,35,4,37,6,-39,8,41,10,43,12,45,14,-47,16,49,18,51,20,53,22,-55,24,57,26,59,28,61,30,-63}), 0,1,2,3,4,5,6,0,8,9,10,11,12,13,14,0,16,17,18,19,20,21,22,0,24,25,26,27,28,29,30,0)); + __m256i test_mm256_shuffle_epi32(__m256i a) { // CHECK-LABEL: test_mm256_shuffle_epi32 // CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> poison, <8 x i32> <i32 3, i32 3, i32 0, i32 0, i32 7, i32 7, i32 4, i32 4> diff --git a/clang/test/CodeGen/X86/avx512bw-builtins.c b/clang/test/CodeGen/X86/avx512bw-builtins.c index af1c904..fddf17d 100644 --- a/clang/test/CodeGen/X86/avx512bw-builtins.c +++ b/clang/test/CodeGen/X86/avx512bw-builtins.c @@ -1466,18 +1466,27 @@ __m512i test_mm512_shuffle_epi8(__m512i __A, __m512i __B) { // CHECK: @llvm.x86.avx512.pshuf.b.512 return _mm512_shuffle_epi8(__A,__B); } + +TEST_CONSTEXPR(match_v64qi(_mm512_shuffle_epi8((__m512i)(__v64qi){0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63}, (__m512i)(__v64qs){0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,-15,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,-15,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,-79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,-95}), 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,0,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,0,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,0,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,0)); + __m512i test_mm512_mask_shuffle_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_mask_shuffle_epi8 // CHECK: @llvm.x86.avx512.pshuf.b.512 // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}} return _mm512_mask_shuffle_epi8(__W,__U,__A,__B); } + +TEST_CONSTEXPR(match_v64qi(_mm512_mask_shuffle_epi8((__m512i)(__v64qi){1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8}, 0xFFFFFFFF00000000, (__m512i)(__v64qi){0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63}, (__m512i)(__v64qi){63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0}), 1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32,63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48)); + __m512i test_mm512_maskz_shuffle_epi8(__mmask64 __U, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_maskz_shuffle_epi8 // CHECK: @llvm.x86.avx512.pshuf.b.512 // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}} return _mm512_maskz_shuffle_epi8(__U,__A,__B); } + +TEST_CONSTEXPR(match_v64qi(_mm512_maskz_shuffle_epi8(0x8888888888888888,(__m512i)(__v64qi){0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63}, (__m512i)(__v64qi){127,126,125,124,123,122,121,120,119,118,117,116,115,114,113,112,111,110,109,108,107,106,105,104,103,102,101,100,99,98,97,96,95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,79,78,77,76,75,74,73,72,71,70,69,68,67,66,65,64}), 0,0,0,12,0,0,0,8,0,0,0,4,0,0,0,0,0,0,0,28,0,0,0,24,0,0,0,20,0,0,0,16,0,0,0,44,0,0,0,40,0,0,0,36,0,0,0,32,0,0,0,60,0,0,0,56,0,0,0,52,0,0,0,48)); + __m512i test_mm512_subs_epi8(__m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_subs_epi8 // CHECK: @llvm.ssub.sat.v64i8 diff --git a/clang/test/CodeGen/X86/avx512cd-builtins.c b/clang/test/CodeGen/X86/avx512cd-builtins.c index b9d42b7..80a20b1 100644 --- a/clang/test/CodeGen/X86/avx512cd-builtins.c +++ b/clang/test/CodeGen/X86/avx512cd-builtins.c @@ -14,37 +14,53 @@ __m512i test_mm512_conflict_epi64(__m512i __A) { // CHECK-LABEL: test_mm512_conflict_epi64 // CHECK: call {{.*}}<8 x i64> @llvm.x86.avx512.conflict.q.512(<8 x i64> %{{.*}}) - return _mm512_conflict_epi64(__A); + return _mm512_conflict_epi64(__A); } + +TEST_CONSTEXPR(match_v8di(_mm512_conflict_epi64((__m512i)(__v8di){1, 2, 1, 3, 2, 4, 1, 5}), 0, 0, 1, 0, 2, 0, 5, 0)); +TEST_CONSTEXPR(match_v8di(_mm512_conflict_epi64((__m512i)(__v8di){5, 5, 5, 5, 5, 5, 5, 5}), 0, 1, 3, 7, 15, 31, 63, 127)); +TEST_CONSTEXPR(match_v8di(_mm512_conflict_epi64((__m512i)(__v8di){1, 2, 3, 4, 5, 6, 7, 8}), 0, 0, 0, 0, 0, 0, 0, 0)); __m512i test_mm512_mask_conflict_epi64(__m512i __W, __mmask8 __U, __m512i __A) { // CHECK-LABEL: test_mm512_mask_conflict_epi64 // CHECK: call {{.*}}<8 x i64> @llvm.x86.avx512.conflict.q.512(<8 x i64> %{{.*}}) // CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}} - return _mm512_mask_conflict_epi64(__W,__U,__A); + return _mm512_mask_conflict_epi64(__W,__U,__A); } + +TEST_CONSTEXPR(match_v8di(_mm512_mask_conflict_epi64((__m512i)(__v8di){0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 0x55, (__m512i)(__v8di){1, 2, 1, 3, 2, 4, 1, 5}), 0, 0xFF, 1, 0xFF, 2, 0xFF, 5, 0xFF)); __m512i test_mm512_maskz_conflict_epi64(__mmask8 __U, __m512i __A) { // CHECK-LABEL: test_mm512_maskz_conflict_epi64 // CHECK: call {{.*}}<8 x i64> @llvm.x86.avx512.conflict.q.512(<8 x i64> %{{.*}}) // CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}} - return _mm512_maskz_conflict_epi64(__U,__A); + return _mm512_maskz_conflict_epi64(__U,__A); } + +TEST_CONSTEXPR(match_v8di(_mm512_maskz_conflict_epi64(0x55, (__m512i)(__v8di){1, 2, 1, 3, 2, 4, 1, 5}), 0, 0, 1, 0, 2, 0, 5, 0)); __m512i test_mm512_conflict_epi32(__m512i __A) { // CHECK-LABEL: test_mm512_conflict_epi32 // CHECK: call <16 x i32> @llvm.x86.avx512.conflict.d.512(<16 x i32> %{{.*}}) - return _mm512_conflict_epi32(__A); + return _mm512_conflict_epi32(__A); } + +TEST_CONSTEXPR(match_v16si(_mm512_conflict_epi32((__m512i)(__v16si){1, 2, 1, 3, 2, 4, 1, 5, 6, 7, 6, 8, 7, 9, 6, 10}), 0, 0, 1, 0, 2, 0, 5, 0, 0, 0, 256, 0, 512, 0, 1280, 0)); +TEST_CONSTEXPR(match_v16si(_mm512_conflict_epi32((__m512i)(__v16si){9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9}), 0, 1, 3, 7, 15, 31, 63, 127, 255, 511, 1023, 2047, 4095, 8191, 16383, 32767)); +TEST_CONSTEXPR(match_v16si(_mm512_conflict_epi32((__m512i)(__v16si){1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); __m512i test_mm512_mask_conflict_epi32(__m512i __W, __mmask16 __U, __m512i __A) { // CHECK-LABEL: test_mm512_mask_conflict_epi32 // CHECK: call <16 x i32> @llvm.x86.avx512.conflict.d.512(<16 x i32> %{{.*}}) // CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}} - return _mm512_mask_conflict_epi32(__W,__U,__A); + return _mm512_mask_conflict_epi32(__W,__U,__A); } + +TEST_CONSTEXPR(match_v16si(_mm512_mask_conflict_epi32((__m512i)(__v16si){0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 0x5555, (__m512i)(__v16si){1, 2, 1, 3, 2, 4, 1, 5, 6, 7, 6, 8, 7, 9, 6, 10}), 0, 0xFF, 1, 0xFF, 2, 0xFF, 5, 0xFF, 0, 0xFF, 256, 0xFF, 512, 0xFF, 1280, 0xFF)); __m512i test_mm512_maskz_conflict_epi32(__mmask16 __U, __m512i __A) { // CHECK-LABEL: test_mm512_maskz_conflict_epi32 // CHECK: call <16 x i32> @llvm.x86.avx512.conflict.d.512(<16 x i32> %{{.*}}) // CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}} - return _mm512_maskz_conflict_epi32(__U,__A); + return _mm512_maskz_conflict_epi32(__U,__A); } + +TEST_CONSTEXPR(match_v16si(_mm512_maskz_conflict_epi32(0x5555, (__m512i)(__v16si){1, 2, 1, 3, 2, 4, 1, 5, 6, 7, 6, 8, 7, 9, 6, 10}), 0, 0, 1, 0, 2, 0, 5, 0, 0, 0, 256, 0, 512, 0, 1280, 0)); __m512i test_mm512_lzcnt_epi32(__m512i __A) { // CHECK-LABEL: test_mm512_lzcnt_epi32 // CHECK: call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %{{.*}}, i1 true) @@ -125,6 +141,8 @@ __m512i test_mm512_broadcastmb_epi64(__m512i a, __m512i b) { // CHECK: insertelement <8 x i64> %{{.*}}, i64 %{{.*}}, i32 7 return _mm512_broadcastmb_epi64(_mm512_cmpeq_epu64_mask ( a, b)); } +TEST_CONSTEXPR(match_v8di(_mm512_broadcastmb_epi64((__mmask8)(0)), 0,0,0,0, 0,0,0,0)); +TEST_CONSTEXPR(match_v8di(_mm512_broadcastmb_epi64((__mmask8)(0xab)), 0xab,0xab,0xab,0xab, 0xab,0xab,0xab,0xab)); __m512i test_mm512_broadcastmw_epi32(__m512i a, __m512i b) { // CHECK-LABEL: test_mm512_broadcastmw_epi32 @@ -148,3 +166,5 @@ __m512i test_mm512_broadcastmw_epi32(__m512i a, __m512i b) { // CHECK: insertelement <16 x i32> %{{.*}}, i32 %{{.*}} return _mm512_broadcastmw_epi32(_mm512_cmpeq_epi32_mask ( a, b)); } +TEST_CONSTEXPR(match_v16si(_mm512_broadcastmw_epi32((__mmask16)(0xff)), 0xff,0xff,0xff,0xff, 0xff,0xff,0xff,0xff, 0xff,0xff,0xff,0xff, 0xff,0xff,0xff,0xff)); +TEST_CONSTEXPR(match_v16si(_mm512_broadcastmw_epi32((__mmask16)(0x0FA1L)), 0x0FA1L,0x0FA1L,0x0FA1L,0x0FA1L, 0x0FA1L,0x0FA1L,0x0FA1L,0x0FA1L, 0x0FA1L,0x0FA1L,0x0FA1L,0x0FA1L, 0x0FA1L,0x0FA1L,0x0FA1L,0x0FA1L)); diff --git a/clang/test/CodeGen/X86/avx512dq-builtins.c b/clang/test/CodeGen/X86/avx512dq-builtins.c index 4112561..1b09959 100644 --- a/clang/test/CodeGen/X86/avx512dq-builtins.c +++ b/clang/test/CodeGen/X86/avx512dq-builtins.c @@ -1305,6 +1305,7 @@ __m512 test_mm512_mask_broadcast_f32x8(__m512 __O, __mmask16 __M, float const* _ // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}} return _mm512_mask_broadcast_f32x8(__O, __M, _mm256_loadu_ps(__A)); } +TEST_CONSTEXPR(match_m512(_mm512_mask_broadcast_f32x8(_mm512_setzero_ps(), 0xAAAA, (__m256)(__v8sf){5.0f,5.0f,5.0f,5.0f,5.0f,5.0f,5.0f,5.0f}), 0,5,0,5,0,5,0,5,0,5,0,5,0,5,0,5)); __m512 test_mm512_maskz_broadcast_f32x8(__mmask16 __M, float const* __A) { // CHECK-LABEL: test_mm512_maskz_broadcast_f32x8 @@ -1312,6 +1313,7 @@ __m512 test_mm512_maskz_broadcast_f32x8(__mmask16 __M, float const* __A) { // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}} return _mm512_maskz_broadcast_f32x8(__M, _mm256_loadu_ps(__A)); } +TEST_CONSTEXPR(match_m512(_mm512_maskz_broadcast_f32x8(0xAAAA, _mm256_set1_ps(7.0f)), 0,7,0,7,0,7,0,7,0,7,0,7,0,7,0,7)); __m512d test_mm512_broadcast_f64x2(double const* __A) { // CHECK-LABEL: test_mm512_broadcast_f64x2 @@ -1327,6 +1329,8 @@ __m512d test_mm512_mask_broadcast_f64x2(__m512d __O, __mmask8 __M, double const* return _mm512_mask_broadcast_f64x2(__O, __M, _mm_loadu_pd(__A)); } +TEST_CONSTEXPR(match_m512d(_mm512_mask_broadcast_f64x2(_mm512_setzero_pd(), 0xAA, (__m128d)(__v2df){1,2}), 0,2,0,2,0,2,0,2)); + __m512d test_mm512_maskz_broadcast_f64x2(__mmask8 __M, double const* __A) { // CHECK-LABEL: test_mm512_maskz_broadcast_f64x2 // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1> @@ -1334,6 +1338,8 @@ __m512d test_mm512_maskz_broadcast_f64x2(__mmask8 __M, double const* __A) { return _mm512_maskz_broadcast_f64x2(__M, _mm_loadu_pd(__A)); } +TEST_CONSTEXPR(match_m512d(_mm512_maskz_broadcast_f64x2(0xAA, (__m128d)(__v2df){1,2}), 0,2,0,2,0,2,0,2)); + __m512i test_mm512_broadcast_i32x2(__m128i __A) { // CHECK-LABEL: test_mm512_broadcast_i32x2 // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1> @@ -1348,6 +1354,8 @@ __m512i test_mm512_mask_broadcast_i32x2(__m512i __O, __mmask16 __M, __m128i __A) return _mm512_mask_broadcast_i32x2(__O, __M, __A); } +TEST_CONSTEXPR(match_v16si(_mm512_mask_broadcast_i32x2(_mm512_setzero_si512(), 0xAAAA, (__m128i)(__v4si){0,1,2,3}), 0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1)); + __m512i test_mm512_maskz_broadcast_i32x2(__mmask16 __M, __m128i __A) { // CHECK-LABEL: test_mm512_maskz_broadcast_i32x2 // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1> @@ -1355,6 +1363,8 @@ __m512i test_mm512_maskz_broadcast_i32x2(__mmask16 __M, __m128i __A) { return _mm512_maskz_broadcast_i32x2(__M, __A); } +TEST_CONSTEXPR(match_v16si(_mm512_maskz_broadcast_i32x2(0xAAAA, (__m128i)(__v4si){0,1,2,3}), 0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1)); + __m512i test_mm512_broadcast_i32x8(__m256i const* __A) { // CHECK-LABEL: test_mm512_broadcast_i32x8 // CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> @@ -1368,6 +1378,7 @@ __m512i test_mm512_mask_broadcast_i32x8(__m512i __O, __mmask16 __M, __m256i cons // CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}} return _mm512_mask_broadcast_i32x8(__O, __M, _mm256_loadu_si256(__A)); } +TEST_CONSTEXPR(match_v16si(_mm512_mask_broadcast_i32x8(_mm512_setzero_si512(), 0xAAAA, _mm256_set1_epi32(8)), 0,8,0,8,0,8,0,8,0,8,0,8,0,8,0,8)); __m512i test_mm512_maskz_broadcast_i32x8(__mmask16 __M, __m256i const* __A) { // CHECK-LABEL: test_mm512_maskz_broadcast_i32x8 @@ -1376,6 +1387,8 @@ __m512i test_mm512_maskz_broadcast_i32x8(__mmask16 __M, __m256i const* __A) { return _mm512_maskz_broadcast_i32x8(__M, _mm256_loadu_si256(__A)); } +TEST_CONSTEXPR(match_v16si(_mm512_maskz_broadcast_i32x8(0xAAAA, _mm256_set1_epi32(9)), 0,9,0,9,0,9,0,9,0,9,0,9,0,9,0,9)); + __m512i test_mm512_broadcast_i64x2(__m128i const* __A) { // CHECK-LABEL: test_mm512_broadcast_i64x2 // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1> diff --git a/clang/test/CodeGen/X86/avx512f-builtins.c b/clang/test/CodeGen/X86/avx512f-builtins.c index 7756f0d..3deaf8e 100644 --- a/clang/test/CodeGen/X86/avx512f-builtins.c +++ b/clang/test/CodeGen/X86/avx512f-builtins.c @@ -6836,6 +6836,8 @@ __m512 test_mm512_mask_broadcast_f32x4(__m512 __O, __mmask16 __M, float const* _ return _mm512_mask_broadcast_f32x4(__O, __M, _mm_loadu_ps(__A)); } +TEST_CONSTEXPR(match_m512(_mm512_mask_broadcast_f32x4(_mm512_setzero_ps(), 0xAAAA, (__m128)(__v4sf){1,2,3,4}), 0,2,0,4,0,2,0,4,0,2,0,4,0,2,0,4)); + __m512 test_mm512_maskz_broadcast_f32x4(__mmask16 __M, float const* __A) { // CHECK-LABEL: test_mm512_maskz_broadcast_f32x4 // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3> @@ -6843,6 +6845,8 @@ __m512 test_mm512_maskz_broadcast_f32x4(__mmask16 __M, float const* __A) { return _mm512_maskz_broadcast_f32x4(__M, _mm_loadu_ps(__A)); } +TEST_CONSTEXPR(match_m512(_mm512_maskz_broadcast_f32x4(0xAAAA, (__m128)(__v4sf){1,2,3,4}), 0,2,0,4,0,2,0,4,0,2,0,4,0,2,0,4)); + __m512d test_mm512_broadcast_f64x4(double const* __A) { // CHECK-LABEL: test_mm512_broadcast_f64x4 // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3> @@ -6885,6 +6889,8 @@ __m512i test_mm512_maskz_broadcast_i32x4(__mmask16 __M, __m128i const* __A) { return _mm512_maskz_broadcast_i32x4(__M, _mm_loadu_si128(__A)); } +TEST_CONSTEXPR(match_v16si(_mm512_maskz_broadcast_i32x4(0xAAAA, (__m128i)(__v4si){0,1,2,3}), 0,1,0,3,0,1,0,3,0,1,0,3,0,1,0,3)); + __m512i test_mm512_broadcast_i64x4(__m256i const* __A) { // CHECK-LABEL: test_mm512_broadcast_i64x4 // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3> @@ -10903,6 +10909,8 @@ __m512i test_mm512_mask_set1_epi32 (__m512i __O, __mmask16 __M, int __A) return _mm512_mask_set1_epi32 ( __O, __M, __A); } +TEST_CONSTEXPR(match_v16si(_mm512_mask_set1_epi32(_mm512_setzero_si512(), 0xAAAA, 13), 0,13,0,13,0,13,0,13,0,13,0,13,0,13,0,13)); + __m512i test_mm512_maskz_set1_epi32(__mmask16 __M, int __A) { // CHECK-LABEL: test_mm512_maskz_set1_epi32 @@ -10926,6 +10934,8 @@ __m512i test_mm512_maskz_set1_epi32(__mmask16 __M, int __A) return _mm512_maskz_set1_epi32(__M, __A); } +TEST_CONSTEXPR(match_v16si(_mm512_maskz_set1_epi32(0xAAAA, 19), 0,19,0,19,0,19,0,19,0,19,0,19,0,19,0,19)); + __m512i test_mm512_set_epi8(char e63, char e62, char e61, char e60, char e59, char e58, char e57, char e56, char e55, char e54, char e53, char e52, @@ -11145,6 +11155,8 @@ __m512i test_mm512_mask_set1_epi64 (__m512i __O, __mmask8 __M, long long __A) return _mm512_mask_set1_epi64 (__O, __M, __A); } +TEST_CONSTEXPR(match_v8di(_mm512_mask_set1_epi64(_mm512_setzero_si512(), 0xAA, 21), 0,21,0,21,0,21,0,21)); + __m512i test_mm512_maskz_set1_epi64 (__mmask8 __M, long long __A) { // CHECK-LABEL: test_mm512_maskz_set1_epi64 @@ -11160,6 +11172,8 @@ __m512i test_mm512_maskz_set1_epi64 (__mmask8 __M, long long __A) return _mm512_maskz_set1_epi64 (__M, __A); } +TEST_CONSTEXPR(match_v8di(_mm512_maskz_set1_epi64(0xAA, 23), 0, 23, 0, 23, 0, 23, 0, 23)); + __m512i test_mm512_set_epi64 (long long __A, long long __B, long long __C, long long __D, long long __E, long long __F, diff --git a/clang/test/CodeGen/X86/avx512vl-builtins.c b/clang/test/CodeGen/X86/avx512vl-builtins.c index 51385d5..9b6bfea9 100644 --- a/clang/test/CodeGen/X86/avx512vl-builtins.c +++ b/clang/test/CodeGen/X86/avx512vl-builtins.c @@ -7201,6 +7201,8 @@ __m128i test_mm_mask_set1_epi32(__m128i __O, __mmask8 __M) { return _mm_mask_set1_epi32(__O, __M, 5); } +TEST_CONSTEXPR(match_v4si(_mm_mask_set1_epi32(_mm_setzero_si128(), 0xF, 7), 7, 7, 7, 7)); + __m128i test_mm_maskz_set1_epi32(__mmask8 __M) { // CHECK-LABEL: test_mm_maskz_set1_epi32 // CHECK: insertelement <4 x i32> poison, i32 %{{.*}}, i32 0 @@ -7212,6 +7214,8 @@ __m128i test_mm_maskz_set1_epi32(__mmask8 __M) { return _mm_maskz_set1_epi32(__M, 5); } +TEST_CONSTEXPR(match_v4si(_mm_maskz_set1_epi32(0xA, 11), 0, 11, 0, 11)); + __m256i test_mm256_mask_set1_epi32(__m256i __O, __mmask8 __M) { // CHECK-LABEL: test_mm256_mask_set1_epi32 // CHECK: insertelement <8 x i32> poison, i32 %{{.*}}, i32 0 @@ -7226,6 +7230,8 @@ __m256i test_mm256_mask_set1_epi32(__m256i __O, __mmask8 __M) { return _mm256_mask_set1_epi32(__O, __M, 5); } +TEST_CONSTEXPR(match_v8si(_mm256_mask_set1_epi32(_mm256_setzero_si256(), 0xAA, 5), 0, 5, 0, 5, 0, 5, 0, 5)); + __m256i test_mm256_maskz_set1_epi32(__mmask8 __M) { // CHECK-LABEL: test_mm256_maskz_set1_epi32 // CHECK: insertelement <8 x i32> poison, i32 %{{.*}}, i32 0 @@ -7240,6 +7246,8 @@ __m256i test_mm256_maskz_set1_epi32(__mmask8 __M) { return _mm256_maskz_set1_epi32(__M, 5); } +TEST_CONSTEXPR(match_v8si(_mm256_maskz_set1_epi32(0xAA, 13), 0, 13, 0, 13, 0, 13, 0, 13)); + __m128i test_mm_mask_set1_epi64(__m128i __O, __mmask8 __M, long long __A) { // CHECK-LABEL: test_mm_mask_set1_epi64 // CHECK: insertelement <2 x i64> poison, i64 %{{.*}}, i32 0 @@ -7249,6 +7257,8 @@ __m128i test_mm_mask_set1_epi64(__m128i __O, __mmask8 __M, long long __A) { return _mm_mask_set1_epi64(__O, __M, __A); } +TEST_CONSTEXPR(match_v2di(_mm_mask_set1_epi64(_mm_setzero_si128(), 0x3, 9), 9, 9)); + __m128i test_mm_maskz_set1_epi64(__mmask8 __M, long long __A) { // CHECK-LABEL: test_mm_maskz_set1_epi64 // CHECK: insertelement <2 x i64> poison, i64 %{{.*}}, i32 0 @@ -7258,6 +7268,8 @@ __m128i test_mm_maskz_set1_epi64(__mmask8 __M, long long __A) { return _mm_maskz_set1_epi64(__M, __A); } +TEST_CONSTEXPR(match_v2di(_mm_maskz_set1_epi64(0x2, 15), 0, 15)); + __m256i test_mm256_mask_set1_epi64(__m256i __O, __mmask8 __M, long long __A) { // CHECK-LABEL: test_mm256_mask_set1_epi64 // CHECK: insertelement <4 x i64> poison, i64 %{{.*}}, i32 0 @@ -7269,6 +7281,8 @@ __m256i test_mm256_mask_set1_epi64(__m256i __O, __mmask8 __M, long long __A) { return _mm256_mask_set1_epi64(__O, __M, __A); } +TEST_CONSTEXPR(match_v4di(_mm256_mask_set1_epi64(_mm256_setzero_si256(), 0xF, 11), 11, 11, 11, 11)); + __m256i test_mm256_maskz_set1_epi64(__mmask8 __M, long long __A) { // CHECK-LABEL: test_mm256_maskz_set1_epi64 // CHECK: insertelement <4 x i64> poison, i64 %{{.*}}, i32 0 @@ -7280,6 +7294,8 @@ __m256i test_mm256_maskz_set1_epi64(__mmask8 __M, long long __A) { return _mm256_maskz_set1_epi64(__M, __A); } +TEST_CONSTEXPR(match_v4di(_mm256_maskz_set1_epi64(0xA, 17), 0, 17, 0, 17)); + __m128d test_mm_fixupimm_pd(__m128d __A, __m128d __B, __m128i __C) { // CHECK-LABEL: test_mm_fixupimm_pd // CHECK: @llvm.x86.avx512.mask.fixupimm.pd.128 @@ -7623,6 +7639,8 @@ __m128d test_mm_mask_unpackhi_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d return _mm_mask_unpackhi_pd(__W, __U, __A, __B); } +TEST_CONSTEXPR(match_m128d(_mm_mask_unpackhi_pd(_mm_setzero_pd(), 0x3, (__m128d)(__v2df){1.0,2.0}, (__m128d)(__v2df){3.0,4.0}), 2.0,4.0)); + __m128d test_mm_maskz_unpackhi_pd(__mmask8 __U, __m128d __A, __m128d __B) { // CHECK-LABEL: test_mm_maskz_unpackhi_pd // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 1, i32 3> @@ -7637,6 +7655,8 @@ __m256d test_mm256_mask_unpackhi_pd(__m256d __W, __mmask8 __U, __m256d __A, __m2 return _mm256_mask_unpackhi_pd(__W, __U, __A, __B); } +TEST_CONSTEXPR(match_m256d(_mm256_mask_unpackhi_pd(_mm256_setzero_pd(), 0xAA, (__m256d)(__v4df){1.0,2.0,3.0,4.0}, (__m256d)(__v4df){5.0,6.0,7.0,8.0}), 0,6.0,0,8.0)); + __m256d test_mm256_maskz_unpackhi_pd(__mmask8 __U, __m256d __A, __m256d __B) { // CHECK-LABEL: test_mm256_maskz_unpackhi_pd // CHECK: shufflevector <8 x i1> %{{.*}}, <8 x i1> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3> @@ -7679,6 +7699,8 @@ __m128d test_mm_mask_unpacklo_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d return _mm_mask_unpacklo_pd(__W, __U, __A, __B); } +TEST_CONSTEXPR(match_m128d(_mm_mask_unpacklo_pd(_mm_setzero_pd(), 0x3, (__m128d)(__v2df){1.0,2.0}, (__m128d)(__v2df){3.0,4.0}), 1.0,3.0)); + __m128d test_mm_maskz_unpacklo_pd(__mmask8 __U, __m128d __A, __m128d __B) { // CHECK-LABEL: test_mm_maskz_unpacklo_pd // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 0, i32 2> @@ -7686,6 +7708,8 @@ __m128d test_mm_maskz_unpacklo_pd(__mmask8 __U, __m128d __A, __m128d __B) { return _mm_maskz_unpacklo_pd(__U, __A, __B); } +TEST_CONSTEXPR(match_m128d(_mm_maskz_unpacklo_pd(0x2, (__m128d)(__v2df){1.0,2.0}, (__m128d)(__v2df){3.0,4.0}), 0.0,3.0)); + __m256d test_mm256_mask_unpacklo_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { // CHECK-LABEL: test_mm256_mask_unpacklo_pd // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 2, i32 6> @@ -7693,6 +7717,8 @@ __m256d test_mm256_mask_unpacklo_pd(__m256d __W, __mmask8 __U, __m256d __A, __m2 return _mm256_mask_unpacklo_pd(__W, __U, __A, __B); } +TEST_CONSTEXPR(match_m256d(_mm256_mask_unpacklo_pd(_mm256_setzero_pd(), 0xAA, (__m256d)(__v4df){1.0,2.0,3.0,4.0}, (__m256d)(__v4df){5.0,6.0,7.0,8.0}), 0,5.0,0,7.0)); + __m256d test_mm256_maskz_unpacklo_pd(__mmask8 __U, __m256d __A, __m256d __B) { // CHECK-LABEL: test_mm256_maskz_unpacklo_pd // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 2, i32 6> @@ -7700,6 +7726,8 @@ __m256d test_mm256_maskz_unpacklo_pd(__mmask8 __U, __m256d __A, __m256d __B) { return _mm256_maskz_unpacklo_pd(__U, __A, __B); } +TEST_CONSTEXPR(match_m256d(_mm256_maskz_unpacklo_pd(0x0A, (__m256d)(__v4df){1.0,2.0,3.0,4.0}, (__m256d)(__v4df){5.0,6.0,7.0,8.0}), 0.0,5.0,0.0,7.0)); + __m128 test_mm_mask_unpacklo_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { // CHECK-LABEL: test_mm_mask_unpacklo_ps // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 0, i32 4, i32 1, i32 5> @@ -8039,6 +8067,8 @@ __m128i test_mm_mask_unpackhi_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m1 return _mm_mask_unpackhi_epi32(__W, __U, __A, __B); } +TEST_CONSTEXPR(match_v4si(_mm_mask_unpackhi_epi32(_mm_setzero_si128(), 0xA, (__m128i)(__v4si){0,1,2,3}, (__m128i)(__v4si){4,5,6,7}), 0,6,0,7)); + __m128i test_mm_maskz_unpackhi_epi32(__mmask8 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_maskz_unpackhi_epi32 // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 2, i32 6, i32 3, i32 7> @@ -8046,6 +8076,8 @@ __m128i test_mm_maskz_unpackhi_epi32(__mmask8 __U, __m128i __A, __m128i __B) { return _mm_maskz_unpackhi_epi32(__U, __A, __B); } +TEST_CONSTEXPR(match_v4si(_mm_maskz_unpackhi_epi32(0x5, (__m128i)(__v4si){0,1,2,3}, (__m128i)(__v4si){4,5,6,7}), 2,0,3,0)); + __m256i test_mm256_mask_unpackhi_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_mask_unpackhi_epi32 // CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15> @@ -8997,6 +9029,8 @@ __m256 test_mm256_maskz_broadcast_f32x4(__mmask8 __M, __m128 __A) { return _mm256_maskz_broadcast_f32x4(__M, __A); } +TEST_CONSTEXPR(match_m256(_mm256_maskz_broadcast_f32x4(0xAA, (__m128)(__v4sf){0,1,2,3}), 0,1,0,3,0,1,0,3)); + __m256i test_mm256_broadcast_i32x4(__m128i const* __A) { // CHECK-LABEL: test_mm256_broadcast_i32x4 // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3> @@ -9018,6 +9052,8 @@ __m256i test_mm256_maskz_broadcast_i32x4(__mmask8 __M, __m128i const* __A) { return _mm256_maskz_broadcast_i32x4(__M, _mm_loadu_si128(__A)); } +TEST_CONSTEXPR(match_v8si(_mm256_maskz_broadcast_i32x4(0xAA, (__m128i)(__v4si){0,1,2,3}), 0,1,0,3,0,1,0,3)); + __m256d test_mm256_mask_broadcastsd_pd(__m256d __O, __mmask8 __M, __m128d __A) { // CHECK-LABEL: test_mm256_mask_broadcastsd_pd // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <4 x i32> zeroinitializer @@ -10324,6 +10360,8 @@ __m128 test_mm_mask_movehdup_ps(__m128 __W, __mmask8 __U, __m128 __A) { return _mm_mask_movehdup_ps(__W, __U, __A); } +TEST_CONSTEXPR(match_m128(_mm_mask_movehdup_ps(_mm_setzero_ps(), 0xF, (__m128)(__v4sf){1.f,2.f,3.f,4.f}), 2.f,2.f,4.f,4.f)); + __m128 test_mm_maskz_movehdup_ps(__mmask8 __U, __m128 __A) { // CHECK-LABEL: test_mm_maskz_movehdup_ps // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 1, i32 1, i32 3, i32 3> @@ -10338,6 +10376,8 @@ __m256 test_mm256_mask_movehdup_ps(__m256 __W, __mmask8 __U, __m256 __A) { return _mm256_mask_movehdup_ps(__W, __U, __A); } +TEST_CONSTEXPR(match_m256(_mm256_mask_movehdup_ps(_mm256_setzero_ps(), 0xAA, (__m256)(__v8sf){1,2,3,4,5,6,7,8}), 0,2,0,4,0,6,0,8)); + __m256 test_mm256_maskz_movehdup_ps(__mmask8 __U, __m256 __A) { // CHECK-LABEL: test_mm256_maskz_movehdup_ps // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7> @@ -10352,6 +10392,8 @@ __m128 test_mm_mask_moveldup_ps(__m128 __W, __mmask8 __U, __m128 __A) { return _mm_mask_moveldup_ps(__W, __U, __A); } +TEST_CONSTEXPR(match_m128(_mm_mask_moveldup_ps(_mm_setzero_ps(), 0xF, (__m128)(__v4sf){1.f,2.f,3.f,4.f}), 1.f,1.f,3.f,3.f)); + __m128 test_mm_maskz_moveldup_ps(__mmask8 __U, __m128 __A) { // CHECK-LABEL: test_mm_maskz_moveldup_ps // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 0, i32 0, i32 2, i32 2> @@ -10366,6 +10408,8 @@ __m256 test_mm256_mask_moveldup_ps(__m256 __W, __mmask8 __U, __m256 __A) { return _mm256_mask_moveldup_ps(__W, __U, __A); } +TEST_CONSTEXPR(match_m256(_mm256_mask_moveldup_ps(_mm256_setzero_ps(), 0xAA, (__m256)(__v8sf){1,2,3,4,5,6,7,8}), 0,1,0,3,0,5,0,7)); + __m256 test_mm256_maskz_moveldup_ps(__mmask8 __U, __m256 __A) { // CHECK-LABEL: test_mm256_maskz_moveldup_ps // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6> diff --git a/clang/test/CodeGen/X86/avx512vlbw-builtins.c b/clang/test/CodeGen/X86/avx512vlbw-builtins.c index c0e46de..d569283 100644 --- a/clang/test/CodeGen/X86/avx512vlbw-builtins.c +++ b/clang/test/CodeGen/X86/avx512vlbw-builtins.c @@ -1688,24 +1688,37 @@ __m128i test_mm_mask_shuffle_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m12 // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}} return _mm_mask_shuffle_epi8(__W,__U,__A,__B); } + +TEST_CONSTEXPR(match_v16qi(_mm_mask_shuffle_epi8((__m128i)(__v16qi){1,1,1,1,1,1,1,1,2,2,4,4,6,6,8,8}, 0x00FF, (__m128i)(__v16qi){0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}, (__m128i)(__v16qi){15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0}), 15,14,13,12,11,10,9,8,2,2,4,4,6,6,8,8)); + __m128i test_mm_maskz_shuffle_epi8(__mmask16 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_maskz_shuffle_epi8 // CHECK: @llvm.x86.ssse3.pshuf.b // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}} return _mm_maskz_shuffle_epi8(__U,__A,__B); } + +TEST_CONSTEXPR(match_v16qi(_mm_maskz_shuffle_epi8(0xAAAA, (__m128i)(__v16qi){0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}, (__m128i)(__v16qi){15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0}), 0,14,0,12,0,10,0,8,0,6,0,4,0,2,0,0)); + __m256i test_mm256_mask_shuffle_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_mask_shuffle_epi8 // CHECK: @llvm.x86.avx2.pshuf.b // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}} return _mm256_mask_shuffle_epi8(__W,__U,__A,__B); } + +TEST_CONSTEXPR(match_v32qi(_mm256_mask_shuffle_epi8((__m256i)(__v32qi){1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4}, 0x80808080, (__m256i)(__v32qi){0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31}, (__m256i)(__v32qi){31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0}), 1,1,1,1,1,1,1,8,2,2,2,2,2,2,2,0,3,3,3,3,3,3,3,24,4,4,4,4,4,4,4,16)); + + __m256i test_mm256_maskz_shuffle_epi8(__mmask32 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: test_mm256_maskz_shuffle_epi8 // CHECK: @llvm.x86.avx2.pshuf.b // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}} return _mm256_maskz_shuffle_epi8(__U,__A,__B); } + +TEST_CONSTEXPR(match_v32qi(_mm256_maskz_shuffle_epi8(0x0000FFFF, (__m256i)(__v32qi){0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31}, (__m256i)(__v32qi){31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0}), 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)); + __m128i test_mm_mask_subs_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: test_mm_mask_subs_epi8 // CHECK: @llvm.ssub.sat.v16i8 diff --git a/clang/test/CodeGen/X86/avx512vlcd-builtins.c b/clang/test/CodeGen/X86/avx512vlcd-builtins.c index 1619305..29fc6fd 100644 --- a/clang/test/CodeGen/X86/avx512vlcd-builtins.c +++ b/clang/test/CodeGen/X86/avx512vlcd-builtins.c @@ -20,6 +20,7 @@ __m128i test_mm_broadcastmb_epi64(__m128i a,__m128i b) { // CHECK: insertelement <2 x i64> %{{.*}}, i64 %{{.*}}, i32 1 return _mm_broadcastmb_epi64(_mm_cmpeq_epi32_mask (a, b)); } +TEST_CONSTEXPR(match_v2du(_mm_broadcastmb_epi64((__mmask8)(76)), 76, 76)); __m256i test_mm256_broadcastmb_epi64(__m256i a, __m256i b) { // CHECK-LABEL: test_mm256_broadcastmb_epi64 @@ -32,6 +33,7 @@ __m256i test_mm256_broadcastmb_epi64(__m256i a, __m256i b) { // CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i32 3 return _mm256_broadcastmb_epi64(_mm256_cmpeq_epi64_mask ( a, b)); } +TEST_CONSTEXPR(match_v4di(_mm256_broadcastmb_epi64((__mmask8)(67)), 67, 67, 67, 67)); __m128i test_mm_broadcastmw_epi32(__m512i a, __m512i b) { // CHECK-LABEL: test_mm_broadcastmw_epi32 @@ -43,6 +45,7 @@ __m128i test_mm_broadcastmw_epi32(__m512i a, __m512i b) { // CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i32 3 return _mm_broadcastmw_epi32(_mm512_cmpeq_epi32_mask ( a, b)); } +TEST_CONSTEXPR(match_v4su(_mm_broadcastmw_epi32((__mmask16)(0xbabe)), 0xbabe, 0xbabe, 0xbabe, 0xbabe)); __m256i test_mm256_broadcastmw_epi32(__m512i a, __m512i b) { // CHECK-LABEL: test_mm256_broadcastmw_epi32 @@ -58,87 +61,119 @@ __m256i test_mm256_broadcastmw_epi32(__m512i a, __m512i b) { // CHECK: insertelement <8 x i32> %{{.*}}, i32 %{{.*}}, i32 7 return _mm256_broadcastmw_epi32(_mm512_cmpeq_epi32_mask ( a, b)); } +TEST_CONSTEXPR(match_v8si(_mm256_broadcastmw_epi32((__mmask16)(0xcafe)), 0xcafe,0xcafe,0xcafe,0xcafe, 0xcafe,0xcafe,0xcafe,0xcafe)); __m128i test_mm_conflict_epi64(__m128i __A) { // CHECK-LABEL: test_mm_conflict_epi64 // CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.conflict.q.128(<2 x i64> %{{.*}}) - return _mm_conflict_epi64(__A); + return _mm_conflict_epi64(__A); } +TEST_CONSTEXPR(match_v2di(_mm_conflict_epi64((__m128i)(__v2di){1, 2}), 0, 0)); +TEST_CONSTEXPR(match_v2di(_mm_conflict_epi64((__m128i)(__v2di){5, 5}), 0, 1)); + __m128i test_mm_mask_conflict_epi64(__m128i __W, __mmask8 __U, __m128i __A) { // CHECK-LABEL: test_mm_mask_conflict_epi64 // CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.conflict.q.128(<2 x i64> %{{.*}}) // CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}} - return _mm_mask_conflict_epi64(__W, __U, __A); + return _mm_mask_conflict_epi64(__W, __U, __A); } +TEST_CONSTEXPR(match_v2di(_mm_mask_conflict_epi64((__m128i)(__v2di){0xFF, 0xFF}, 0x2, (__m128i)(__v2di){5, 5}), 0xFF, 1)); + __m128i test_mm_maskz_conflict_epi64(__mmask8 __U, __m128i __A) { // CHECK-LABEL: test_mm_maskz_conflict_epi64 // CHECK: call {{.*}}<2 x i64> @llvm.x86.avx512.conflict.q.128(<2 x i64> %{{.*}}) // CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}} - return _mm_maskz_conflict_epi64(__U, __A); + return _mm_maskz_conflict_epi64(__U, __A); } +TEST_CONSTEXPR(match_v2di(_mm_maskz_conflict_epi64(0x2, (__m128i)(__v2di){5, 5}), 0, 1)); + __m256i test_mm256_conflict_epi64(__m256i __A) { // CHECK-LABEL: test_mm256_conflict_epi64 // CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.conflict.q.256(<4 x i64> %{{.*}}) - return _mm256_conflict_epi64(__A); + return _mm256_conflict_epi64(__A); } +TEST_CONSTEXPR(match_v4di(_mm256_conflict_epi64((__m256i)(__v4di){1, 2, 1, 3}), 0, 0, 1, 0)); +TEST_CONSTEXPR(match_v4di(_mm256_conflict_epi64((__m256i)(__v4di){7, 7, 7, 7}), 0, 1, 3, 7)); +TEST_CONSTEXPR(match_v4di(_mm256_conflict_epi64((__m256i)(__v4di){1, 2, 3, 4}), 0, 0, 0, 0)); + __m256i test_mm256_mask_conflict_epi64(__m256i __W, __mmask8 __U, __m256i __A) { // CHECK-LABEL: test_mm256_mask_conflict_epi64 // CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.conflict.q.256(<4 x i64> %{{.*}}) // CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}} - return _mm256_mask_conflict_epi64(__W, __U, __A); + return _mm256_mask_conflict_epi64(__W, __U, __A); } +TEST_CONSTEXPR(match_v4di(_mm256_mask_conflict_epi64((__m256i)(__v4di){0xFF, 0xFF, 0xFF, 0xFF}, 0x5, (__m256i)(__v4di){1, 2, 1, 3}), 0, 0xFF, 1, 0xFF)); + __m256i test_mm256_maskz_conflict_epi64(__mmask8 __U, __m256i __A) { // CHECK-LABEL: test_mm256_maskz_conflict_epi64 // CHECK: call {{.*}}<4 x i64> @llvm.x86.avx512.conflict.q.256(<4 x i64> %{{.*}}) // CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}} - return _mm256_maskz_conflict_epi64(__U, __A); + return _mm256_maskz_conflict_epi64(__U, __A); } +TEST_CONSTEXPR(match_v4di(_mm256_maskz_conflict_epi64(0x5, (__m256i)(__v4di){1, 2, 1, 3}), 0, 0, 1, 0)); + __m128i test_mm_conflict_epi32(__m128i __A) { // CHECK-LABEL: test_mm_conflict_epi32 // CHECK: call <4 x i32> @llvm.x86.avx512.conflict.d.128(<4 x i32> %{{.*}}) - return _mm_conflict_epi32(__A); + return _mm_conflict_epi32(__A); } +TEST_CONSTEXPR(match_v4si(_mm_conflict_epi32((__m128i)(__v4si){1, 2, 1, 3}), 0, 0, 1, 0)); +TEST_CONSTEXPR(match_v4si(_mm_conflict_epi32((__m128i)(__v4si){3, 3, 3, 3}), 0, 1, 3, 7)); +TEST_CONSTEXPR(match_v4si(_mm_conflict_epi32((__m128i)(__v4si){1, 2, 3, 4}), 0, 0, 0, 0)); + __m128i test_mm_mask_conflict_epi32(__m128i __W, __mmask8 __U, __m128i __A) { // CHECK-LABEL: test_mm_mask_conflict_epi32 // CHECK: call <4 x i32> @llvm.x86.avx512.conflict.d.128(<4 x i32> %{{.*}}) // CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}} - return _mm_mask_conflict_epi32(__W, __U, __A); + return _mm_mask_conflict_epi32(__W, __U, __A); } +TEST_CONSTEXPR(match_v4si(_mm_mask_conflict_epi32((__m128i)(__v4si){0xFF, 0xFF, 0xFF, 0xFF}, 0x5, (__m128i)(__v4si){1, 2, 1, 3}), 0, 0xFF, 1, 0xFF)); + __m128i test_mm_maskz_conflict_epi32(__mmask8 __U, __m128i __A) { // CHECK-LABEL: test_mm_maskz_conflict_epi32 // CHECK: call <4 x i32> @llvm.x86.avx512.conflict.d.128(<4 x i32> %{{.*}}) // CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}} - return _mm_maskz_conflict_epi32(__U, __A); + return _mm_maskz_conflict_epi32(__U, __A); } +TEST_CONSTEXPR(match_v4si(_mm_maskz_conflict_epi32(0x5, (__m128i)(__v4si){1, 2, 1, 3}), 0, 0, 1, 0)); + __m256i test_mm256_conflict_epi32(__m256i __A) { // CHECK-LABEL: test_mm256_conflict_epi32 // CHECK: call <8 x i32> @llvm.x86.avx512.conflict.d.256(<8 x i32> %{{.*}}) - return _mm256_conflict_epi32(__A); + return _mm256_conflict_epi32(__A); } +TEST_CONSTEXPR(match_v8si(_mm256_conflict_epi32((__m256i)(__v8si){1, 2, 1, 3, 2, 4, 1, 5}), 0, 0, 1, 0, 2, 0, 5, 0)); +TEST_CONSTEXPR(match_v8si(_mm256_conflict_epi32((__m256i)(__v8si){4, 4, 4, 4, 4, 4, 4, 4}), 0, 1, 3, 7, 15, 31, 63, 127)); +TEST_CONSTEXPR(match_v8si(_mm256_conflict_epi32((__m256i)(__v8si){1, 2, 3, 4, 5, 6, 7, 8}), 0, 0, 0, 0, 0, 0, 0, 0)); + __m256i test_mm256_mask_conflict_epi32(__m256i __W, __mmask8 __U, __m256i __A) { // CHECK-LABEL: test_mm256_mask_conflict_epi32 // CHECK: call <8 x i32> @llvm.x86.avx512.conflict.d.256(<8 x i32> %{{.*}}) // CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}} - return _mm256_mask_conflict_epi32(__W, __U, __A); + return _mm256_mask_conflict_epi32(__W, __U, __A); } +TEST_CONSTEXPR(match_v8si(_mm256_mask_conflict_epi32((__m256i)(__v8si){0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, /*0101 0101=*/0x55, (__m256i)(__v8si){1, 2, 1, 3, 2, 4, 1, 5}), 0, 0xFF, 1, 0xFF, 2, 0xFF, 5, 0xFF)); + __m256i test_mm256_maskz_conflict_epi32(__mmask8 __U, __m256i __A) { // CHECK-LABEL: test_mm256_maskz_conflict_epi32 // CHECK: call <8 x i32> @llvm.x86.avx512.conflict.d.256(<8 x i32> %{{.*}}) // CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}} - return _mm256_maskz_conflict_epi32(__U, __A); + return _mm256_maskz_conflict_epi32(__U, __A); } +TEST_CONSTEXPR(match_v8si(_mm256_maskz_conflict_epi32(0x55, (__m256i)(__v8si){1, 2, 1, 3, 2, 4, 1, 5}), 0, 0, 1, 0, 2, 0, 5, 0)); + __m128i test_mm_lzcnt_epi32(__m128i __A) { // CHECK-LABEL: test_mm_lzcnt_epi32 // CHECK: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.*}}, i1 true) diff --git a/clang/test/CodeGen/X86/avx512vldq-builtins.c b/clang/test/CodeGen/X86/avx512vldq-builtins.c index 9388457..4773b60 100644 --- a/clang/test/CodeGen/X86/avx512vldq-builtins.c +++ b/clang/test/CodeGen/X86/avx512vldq-builtins.c @@ -987,6 +987,8 @@ __m256 test_mm256_mask_broadcast_f32x2(__m256 __O, __mmask8 __M, __m128 __A) { return _mm256_mask_broadcast_f32x2(__O, __M, __A); } +TEST_CONSTEXPR(match_m256(_mm256_mask_broadcast_f32x2(_mm256_setzero_ps(), 0xAA, (__m128)(__v4sf){1.f,2.f,3.f,4.f}), 0,2.f,0,2.f,0,2.f,0,2.f)); + __m256 test_mm256_maskz_broadcast_f32x2(__mmask8 __M, __m128 __A) { // CHECK-LABEL: test_mm256_maskz_broadcast_f32x2 // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1> @@ -994,6 +996,8 @@ __m256 test_mm256_maskz_broadcast_f32x2(__mmask8 __M, __m128 __A) { return _mm256_maskz_broadcast_f32x2(__M, __A); } +TEST_CONSTEXPR(match_m256(_mm256_maskz_broadcast_f32x2(0xAA, (__m128)(__v4sf){1.f,2.f,3.f,4.f}), 0,2.f,0,2.f,0,2.f,0,2.f)); + __m256d test_mm256_broadcast_f64x2(double const* __A) { // CHECK-LABEL: test_mm256_broadcast_f64x2 // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1> @@ -1008,6 +1012,8 @@ __m256d test_mm256_mask_broadcast_f64x2(__m256d __O, __mmask8 __M, double const* return _mm256_mask_broadcast_f64x2(__O, __M, _mm_loadu_pd(__A)); } +TEST_CONSTEXPR(match_m256d(_mm256_mask_broadcast_f64x2(_mm256_setzero_pd(), 0xA, (__m128d)(__v2df){1.0,2.0}), 0,2.0,0,2.0)); + __m256d test_mm256_maskz_broadcast_f64x2(__mmask8 __M, double const* __A) { // CHECK-LABEL: test_mm256_maskz_broadcast_f64x2 // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1> @@ -1015,6 +1021,8 @@ __m256d test_mm256_maskz_broadcast_f64x2(__mmask8 __M, double const* __A) { return _mm256_maskz_broadcast_f64x2(__M, _mm_loadu_pd(__A)); } +TEST_CONSTEXPR(match_m256d(_mm256_maskz_broadcast_f64x2(0xA, (__m128d)(__v2df){1.0,2.0}), 0,2.0,0,2.0)); + __m128i test_mm_broadcast_i32x2(__m128i __A) { // CHECK-LABEL: test_mm_broadcast_i32x2 // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1> @@ -1029,6 +1037,8 @@ __m128i test_mm_mask_broadcast_i32x2(__m128i __O, __mmask8 __M, __m128i __A) { return _mm_mask_broadcast_i32x2(__O, __M, __A); } +TEST_CONSTEXPR(match_v4si(_mm_mask_broadcast_i32x2(_mm_setzero_si128(), 0xF, (__m128i)(__v4si){0,1,2,3}), 0,1,0,1)); + __m128i test_mm_maskz_broadcast_i32x2(__mmask8 __M, __m128i __A) { // CHECK-LABEL: test_mm_maskz_broadcast_i32x2 // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1> @@ -1036,6 +1046,8 @@ __m128i test_mm_maskz_broadcast_i32x2(__mmask8 __M, __m128i __A) { return _mm_maskz_broadcast_i32x2(__M, __A); } +TEST_CONSTEXPR(match_v4si(_mm_maskz_broadcast_i32x2(0xF, (__m128i)(__v4si){0,1,2,3}), 0,1,0,1)); + __m256i test_mm256_broadcast_i32x2(__m128i __A) { // CHECK-LABEL: test_mm256_broadcast_i32x2 // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1> @@ -1050,6 +1062,8 @@ __m256i test_mm256_mask_broadcast_i32x2(__m256i __O, __mmask8 __M, __m128i __A) return _mm256_mask_broadcast_i32x2(__O, __M, __A); } +TEST_CONSTEXPR(match_v8si(_mm256_mask_broadcast_i32x2(_mm256_setzero_si256(), 0xAA, (__m128i)(__v4si){0,1,2,3}), 0,1,0,1,0,1,0,1)); + __m256i test_mm256_maskz_broadcast_i32x2(__mmask8 __M, __m128i __A) { // CHECK-LABEL: test_mm256_maskz_broadcast_i32x2 // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1> @@ -1057,6 +1071,8 @@ __m256i test_mm256_maskz_broadcast_i32x2(__mmask8 __M, __m128i __A) { return _mm256_maskz_broadcast_i32x2(__M, __A); } +TEST_CONSTEXPR(match_v8si(_mm256_maskz_broadcast_i32x2(0xAA, (__m128i)(__v4si){0,1,2,3}), 0,1,0,1,0,1,0,1)); + __m256i test_mm256_broadcast_i64x2(__m128i const* __A) { // CHECK-LABEL: test_mm256_broadcast_i64x2 // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 0, i32 1> @@ -1078,6 +1094,8 @@ __m256i test_mm256_maskz_broadcast_i64x2(__mmask8 __M, __m128i const* __A) { return _mm256_maskz_broadcast_i64x2(__M, _mm_loadu_si128(__A)); } +TEST_CONSTEXPR(match_v4di(_mm256_maskz_broadcast_i64x2(0xF, (__m128i)(__v2di){1,2}), 1,2,1,2)); + __m128d test_mm256_extractf64x2_pd(__m256d __A) { // CHECK-LABEL: test_mm256_extractf64x2_pd // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> poison, <2 x i32> <i32 2, i32 3> diff --git a/clang/test/CodeGen/X86/mmx-builtins.c b/clang/test/CodeGen/X86/mmx-builtins.c index a1e05a1..d9041d4 100644 --- a/clang/test/CodeGen/X86/mmx-builtins.c +++ b/clang/test/CodeGen/X86/mmx-builtins.c @@ -589,6 +589,8 @@ __m64 test_mm_shuffle_pi8(__m64 a, __m64 b) { return _mm_shuffle_pi8(a, b); } +TEST_CONSTEXPR(match_v8qi(_mm_shuffle_pi8((__m64)(__v8qi){0,1,2,3,4,5,6,7}, (__m64)(__v8qi){10,20,30,40,50,60,70,80}), 2,4,6,0,2,4,6,0)); + __m64 test_mm_shuffle_pi16(__m64 a) { // CHECK-LABEL: test_mm_shuffle_pi16 // CHECK: shufflevector <4 x i16> {{%.*}}, <4 x i16> {{%.*}}, <4 x i32> <i32 3, i32 0, i32 0, i32 0> diff --git a/clang/test/CodeGen/X86/ssse3-builtins.c b/clang/test/CodeGen/X86/ssse3-builtins.c index e623599..32abd9d 100644 --- a/clang/test/CodeGen/X86/ssse3-builtins.c +++ b/clang/test/CodeGen/X86/ssse3-builtins.c @@ -117,6 +117,8 @@ __m128i test_mm_shuffle_epi8(__m128i a, __m128i b) { return _mm_shuffle_epi8(a, b); } +TEST_CONSTEXPR(match_v16qi(_mm_shuffle_epi8((__m128i)(__v16qs){0,-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15}, (__m128i)(__v16qs){15,-14,13,-12,11,-10,9,-8,7,-6,5,-4,3,-2,1,0}), -15,0,-13,0,-11,0,-9,0,-7,0,-5,0,-3,0,-1,0)); + __m128i test_mm_sign_epi8(__m128i a, __m128i b) { // CHECK-LABEL: test_mm_sign_epi8 // CHECK: call <16 x i8> @llvm.x86.ssse3.psign.b.128(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) diff --git a/clang/test/CodeGen/arm-mve-intrinsics/load-store.c b/clang/test/CodeGen/arm-mve-intrinsics/load-store.c index 2dde75f..50f7011 100644 --- a/clang/test/CodeGen/arm-mve-intrinsics/load-store.c +++ b/clang/test/CodeGen/arm-mve-intrinsics/load-store.c @@ -8,8 +8,8 @@ // CHECK-LABEL: @test_vld1q_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = load <8 x half>, ptr [[BASE:%.*]], align 2 -// CHECK-NEXT: ret <8 x half> [[TMP1]] +// CHECK-NEXT: [[TMP0:%.*]] = load <8 x half>, ptr [[BASE:%.*]], align 2 +// CHECK-NEXT: ret <8 x half> [[TMP0]] // float16x8_t test_vld1q_f16(const float16_t *base) { @@ -22,8 +22,8 @@ float16x8_t test_vld1q_f16(const float16_t *base) // CHECK-LABEL: @test_vld1q_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[BASE:%.*]], align 4 -// CHECK-NEXT: ret <4 x float> [[TMP1]] +// CHECK-NEXT: [[TMP0:%.*]] = load <4 x float>, ptr [[BASE:%.*]], align 4 +// CHECK-NEXT: ret <4 x float> [[TMP0]] // float32x4_t test_vld1q_f32(const float32_t *base) { @@ -36,8 +36,8 @@ float32x4_t test_vld1q_f32(const float32_t *base) // CHECK-LABEL: @test_vld1q_s8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[BASE:%.*]], align 1 -// CHECK-NEXT: ret <16 x i8> [[TMP1]] +// CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[BASE:%.*]], align 1 +// CHECK-NEXT: ret <16 x i8> [[TMP0]] // int8x16_t test_vld1q_s8(const int8_t *base) { @@ -50,8 +50,8 @@ int8x16_t test_vld1q_s8(const int8_t *base) // CHECK-LABEL: @test_vld1q_s16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr [[BASE:%.*]], align 2 -// CHECK-NEXT: ret <8 x i16> [[TMP1]] +// CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr [[BASE:%.*]], align 2 +// CHECK-NEXT: ret <8 x i16> [[TMP0]] // int16x8_t test_vld1q_s16(const int16_t *base) { @@ -64,8 +64,8 @@ int16x8_t test_vld1q_s16(const int16_t *base) // CHECK-LABEL: @test_vld1q_s32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[BASE:%.*]], align 4 -// CHECK-NEXT: ret <4 x i32> [[TMP1]] +// CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr [[BASE:%.*]], align 4 +// CHECK-NEXT: ret <4 x i32> [[TMP0]] // int32x4_t test_vld1q_s32(const int32_t *base) { @@ -78,8 +78,8 @@ int32x4_t test_vld1q_s32(const int32_t *base) // CHECK-LABEL: @test_vld1q_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[BASE:%.*]], align 1 -// CHECK-NEXT: ret <16 x i8> [[TMP1]] +// CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[BASE:%.*]], align 1 +// CHECK-NEXT: ret <16 x i8> [[TMP0]] // uint8x16_t test_vld1q_u8(const uint8_t *base) { @@ -92,8 +92,8 @@ uint8x16_t test_vld1q_u8(const uint8_t *base) // CHECK-LABEL: @test_vld1q_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr [[BASE:%.*]], align 2 -// CHECK-NEXT: ret <8 x i16> [[TMP1]] +// CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr [[BASE:%.*]], align 2 +// CHECK-NEXT: ret <8 x i16> [[TMP0]] // uint16x8_t test_vld1q_u16(const uint16_t *base) { @@ -106,8 +106,8 @@ uint16x8_t test_vld1q_u16(const uint16_t *base) // CHECK-LABEL: @test_vld1q_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[BASE:%.*]], align 4 -// CHECK-NEXT: ret <4 x i32> [[TMP1]] +// CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr [[BASE:%.*]], align 4 +// CHECK-NEXT: ret <4 x i32> [[TMP0]] // uint32x4_t test_vld1q_u32(const uint32_t *base) { @@ -120,10 +120,10 @@ uint32x4_t test_vld1q_u32(const uint32_t *base) // CHECK-LABEL: @test_vld1q_z_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]]) -// CHECK-NEXT: [[TMP3:%.*]] = call <8 x half> @llvm.masked.load.v8f16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP2]], <8 x half> zeroinitializer) -// CHECK-NEXT: ret <8 x half> [[TMP3]] +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.masked.load.v8f16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]], <8 x half> zeroinitializer) +// CHECK-NEXT: ret <8 x half> [[TMP2]] // float16x8_t test_vld1q_z_f16(const float16_t *base, mve_pred16_t p) { @@ -136,10 +136,10 @@ float16x8_t test_vld1q_z_f16(const float16_t *base, mve_pred16_t p) // CHECK-LABEL: @test_vld1q_z_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]]) -// CHECK-NEXT: [[TMP3:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP2]], <4 x float> zeroinitializer) -// CHECK-NEXT: ret <4 x float> [[TMP3]] +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]], <4 x float> zeroinitializer) +// CHECK-NEXT: ret <4 x float> [[TMP2]] // float32x4_t test_vld1q_z_f32(const float32_t *base, mve_pred16_t p) { @@ -152,10 +152,10 @@ float32x4_t test_vld1q_z_f32(const float32_t *base, mve_pred16_t p) // CHECK-LABEL: @test_vld1q_z_s8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]]) -// CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP2]], <16 x i8> zeroinitializer) -// CHECK-NEXT: ret <16 x i8> [[TMP3]] +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]], <16 x i8> zeroinitializer) +// CHECK-NEXT: ret <16 x i8> [[TMP2]] // int8x16_t test_vld1q_z_s8(const int8_t *base, mve_pred16_t p) { @@ -168,10 +168,10 @@ int8x16_t test_vld1q_z_s8(const int8_t *base, mve_pred16_t p) // CHECK-LABEL: @test_vld1q_z_s16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]]) -// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP2]], <8 x i16> zeroinitializer) -// CHECK-NEXT: ret <8 x i16> [[TMP3]] +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]], <8 x i16> zeroinitializer) +// CHECK-NEXT: ret <8 x i16> [[TMP2]] // int16x8_t test_vld1q_z_s16(const int16_t *base, mve_pred16_t p) { @@ -184,10 +184,10 @@ int16x8_t test_vld1q_z_s16(const int16_t *base, mve_pred16_t p) // CHECK-LABEL: @test_vld1q_z_s32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]]) -// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP2]], <4 x i32> zeroinitializer) -// CHECK-NEXT: ret <4 x i32> [[TMP3]] +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]], <4 x i32> zeroinitializer) +// CHECK-NEXT: ret <4 x i32> [[TMP2]] // int32x4_t test_vld1q_z_s32(const int32_t *base, mve_pred16_t p) { @@ -200,10 +200,10 @@ int32x4_t test_vld1q_z_s32(const int32_t *base, mve_pred16_t p) // CHECK-LABEL: @test_vld1q_z_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]]) -// CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP2]], <16 x i8> zeroinitializer) -// CHECK-NEXT: ret <16 x i8> [[TMP3]] +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]], <16 x i8> zeroinitializer) +// CHECK-NEXT: ret <16 x i8> [[TMP2]] // uint8x16_t test_vld1q_z_u8(const uint8_t *base, mve_pred16_t p) { @@ -216,10 +216,10 @@ uint8x16_t test_vld1q_z_u8(const uint8_t *base, mve_pred16_t p) // CHECK-LABEL: @test_vld1q_z_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]]) -// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP2]], <8 x i16> zeroinitializer) -// CHECK-NEXT: ret <8 x i16> [[TMP3]] +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]], <8 x i16> zeroinitializer) +// CHECK-NEXT: ret <8 x i16> [[TMP2]] // uint16x8_t test_vld1q_z_u16(const uint16_t *base, mve_pred16_t p) { @@ -232,10 +232,10 @@ uint16x8_t test_vld1q_z_u16(const uint16_t *base, mve_pred16_t p) // CHECK-LABEL: @test_vld1q_z_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]]) -// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP2]], <4 x i32> zeroinitializer) -// CHECK-NEXT: ret <4 x i32> [[TMP3]] +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]], <4 x i32> zeroinitializer) +// CHECK-NEXT: ret <4 x i32> [[TMP2]] // uint32x4_t test_vld1q_z_u32(const uint32_t *base, mve_pred16_t p) { @@ -248,8 +248,8 @@ uint32x4_t test_vld1q_z_u32(const uint32_t *base, mve_pred16_t p) // CHECK-LABEL: @test_vldrbq_s8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[BASE:%.*]], align 1 -// CHECK-NEXT: ret <16 x i8> [[TMP1]] +// CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[BASE:%.*]], align 1 +// CHECK-NEXT: ret <16 x i8> [[TMP0]] // int8x16_t test_vldrbq_s8(const int8_t *base) { @@ -258,9 +258,9 @@ int8x16_t test_vldrbq_s8(const int8_t *base) // CHECK-LABEL: @test_vldrbq_s16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr [[BASE:%.*]], align 1 -// CHECK-NEXT: [[TMP2:%.*]] = sext <8 x i8> [[TMP1]] to <8 x i16> -// CHECK-NEXT: ret <8 x i16> [[TMP2]] +// CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[BASE:%.*]], align 1 +// CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i8> [[TMP0]] to <8 x i16> +// CHECK-NEXT: ret <8 x i16> [[TMP1]] // int16x8_t test_vldrbq_s16(const int8_t *base) { @@ -269,9 +269,9 @@ int16x8_t test_vldrbq_s16(const int8_t *base) // CHECK-LABEL: @test_vldrbq_s32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[BASE:%.*]], align 1 -// CHECK-NEXT: [[TMP2:%.*]] = sext <4 x i8> [[TMP1]] to <4 x i32> -// CHECK-NEXT: ret <4 x i32> [[TMP2]] +// CHECK-NEXT: [[TMP0:%.*]] = load <4 x i8>, ptr [[BASE:%.*]], align 1 +// CHECK-NEXT: [[TMP1:%.*]] = sext <4 x i8> [[TMP0]] to <4 x i32> +// CHECK-NEXT: ret <4 x i32> [[TMP1]] // int32x4_t test_vldrbq_s32(const int8_t *base) { @@ -280,8 +280,8 @@ int32x4_t test_vldrbq_s32(const int8_t *base) // CHECK-LABEL: @test_vldrbq_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[BASE:%.*]], align 1 -// CHECK-NEXT: ret <16 x i8> [[TMP1]] +// CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[BASE:%.*]], align 1 +// CHECK-NEXT: ret <16 x i8> [[TMP0]] // uint8x16_t test_vldrbq_u8(const uint8_t *base) { @@ -290,9 +290,9 @@ uint8x16_t test_vldrbq_u8(const uint8_t *base) // CHECK-LABEL: @test_vldrbq_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr [[BASE:%.*]], align 1 -// CHECK-NEXT: [[TMP2:%.*]] = zext <8 x i8> [[TMP1]] to <8 x i16> -// CHECK-NEXT: ret <8 x i16> [[TMP2]] +// CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr [[BASE:%.*]], align 1 +// CHECK-NEXT: [[TMP1:%.*]] = zext <8 x i8> [[TMP0]] to <8 x i16> +// CHECK-NEXT: ret <8 x i16> [[TMP1]] // uint16x8_t test_vldrbq_u16(const uint8_t *base) { @@ -301,9 +301,9 @@ uint16x8_t test_vldrbq_u16(const uint8_t *base) // CHECK-LABEL: @test_vldrbq_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[BASE:%.*]], align 1 -// CHECK-NEXT: [[TMP2:%.*]] = zext <4 x i8> [[TMP1]] to <4 x i32> -// CHECK-NEXT: ret <4 x i32> [[TMP2]] +// CHECK-NEXT: [[TMP0:%.*]] = load <4 x i8>, ptr [[BASE:%.*]], align 1 +// CHECK-NEXT: [[TMP1:%.*]] = zext <4 x i8> [[TMP0]] to <4 x i32> +// CHECK-NEXT: ret <4 x i32> [[TMP1]] // uint32x4_t test_vldrbq_u32(const uint8_t *base) { @@ -312,10 +312,10 @@ uint32x4_t test_vldrbq_u32(const uint8_t *base) // CHECK-LABEL: @test_vldrbq_z_s8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]]) -// CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP2]], <16 x i8> zeroinitializer) -// CHECK-NEXT: ret <16 x i8> [[TMP3]] +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]], <16 x i8> zeroinitializer) +// CHECK-NEXT: ret <16 x i8> [[TMP2]] // int8x16_t test_vldrbq_z_s8(const int8_t *base, mve_pred16_t p) { @@ -324,11 +324,11 @@ int8x16_t test_vldrbq_z_s8(const int8_t *base, mve_pred16_t p) // CHECK-LABEL: @test_vldrbq_z_s16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]]) -// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr [[BASE:%.*]], i32 1, <8 x i1> [[TMP2]], <8 x i8> zeroinitializer) -// CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i8> [[TMP3]] to <8 x i16> -// CHECK-NEXT: ret <8 x i16> [[TMP4]] +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr [[BASE:%.*]], i32 1, <8 x i1> [[TMP1]], <8 x i8> zeroinitializer) +// CHECK-NEXT: [[TMP3:%.*]] = sext <8 x i8> [[TMP2]] to <8 x i16> +// CHECK-NEXT: ret <8 x i16> [[TMP3]] // int16x8_t test_vldrbq_z_s16(const int8_t *base, mve_pred16_t p) { @@ -337,11 +337,11 @@ int16x8_t test_vldrbq_z_s16(const int8_t *base, mve_pred16_t p) // CHECK-LABEL: @test_vldrbq_z_s32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]]) -// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr [[BASE:%.*]], i32 1, <4 x i1> [[TMP2]], <4 x i8> zeroinitializer) -// CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i8> [[TMP3]] to <4 x i32> -// CHECK-NEXT: ret <4 x i32> [[TMP4]] +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr [[BASE:%.*]], i32 1, <4 x i1> [[TMP1]], <4 x i8> zeroinitializer) +// CHECK-NEXT: [[TMP3:%.*]] = sext <4 x i8> [[TMP2]] to <4 x i32> +// CHECK-NEXT: ret <4 x i32> [[TMP3]] // int32x4_t test_vldrbq_z_s32(const int8_t *base, mve_pred16_t p) { @@ -350,10 +350,10 @@ int32x4_t test_vldrbq_z_s32(const int8_t *base, mve_pred16_t p) // CHECK-LABEL: @test_vldrbq_z_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]]) -// CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP2]], <16 x i8> zeroinitializer) -// CHECK-NEXT: ret <16 x i8> [[TMP3]] +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]], <16 x i8> zeroinitializer) +// CHECK-NEXT: ret <16 x i8> [[TMP2]] // uint8x16_t test_vldrbq_z_u8(const uint8_t *base, mve_pred16_t p) { @@ -362,11 +362,11 @@ uint8x16_t test_vldrbq_z_u8(const uint8_t *base, mve_pred16_t p) // CHECK-LABEL: @test_vldrbq_z_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]]) -// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr [[BASE:%.*]], i32 1, <8 x i1> [[TMP2]], <8 x i8> zeroinitializer) -// CHECK-NEXT: [[TMP4:%.*]] = zext <8 x i8> [[TMP3]] to <8 x i16> -// CHECK-NEXT: ret <8 x i16> [[TMP4]] +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr [[BASE:%.*]], i32 1, <8 x i1> [[TMP1]], <8 x i8> zeroinitializer) +// CHECK-NEXT: [[TMP3:%.*]] = zext <8 x i8> [[TMP2]] to <8 x i16> +// CHECK-NEXT: ret <8 x i16> [[TMP3]] // uint16x8_t test_vldrbq_z_u16(const uint8_t *base, mve_pred16_t p) { @@ -375,11 +375,11 @@ uint16x8_t test_vldrbq_z_u16(const uint8_t *base, mve_pred16_t p) // CHECK-LABEL: @test_vldrbq_z_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]]) -// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr [[BASE:%.*]], i32 1, <4 x i1> [[TMP2]], <4 x i8> zeroinitializer) -// CHECK-NEXT: [[TMP4:%.*]] = zext <4 x i8> [[TMP3]] to <4 x i32> -// CHECK-NEXT: ret <4 x i32> [[TMP4]] +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr [[BASE:%.*]], i32 1, <4 x i1> [[TMP1]], <4 x i8> zeroinitializer) +// CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i8> [[TMP2]] to <4 x i32> +// CHECK-NEXT: ret <4 x i32> [[TMP3]] // uint32x4_t test_vldrbq_z_u32(const uint8_t *base, mve_pred16_t p) { @@ -388,8 +388,8 @@ uint32x4_t test_vldrbq_z_u32(const uint8_t *base, mve_pred16_t p) // CHECK-LABEL: @test_vldrhq_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = load <8 x half>, ptr [[BASE:%.*]], align 2 -// CHECK-NEXT: ret <8 x half> [[TMP1]] +// CHECK-NEXT: [[TMP0:%.*]] = load <8 x half>, ptr [[BASE:%.*]], align 2 +// CHECK-NEXT: ret <8 x half> [[TMP0]] // float16x8_t test_vldrhq_f16(const float16_t *base) { @@ -398,8 +398,8 @@ float16x8_t test_vldrhq_f16(const float16_t *base) // CHECK-LABEL: @test_vldrhq_s16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr [[BASE:%.*]], align 2 -// CHECK-NEXT: ret <8 x i16> [[TMP1]] +// CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr [[BASE:%.*]], align 2 +// CHECK-NEXT: ret <8 x i16> [[TMP0]] // int16x8_t test_vldrhq_s16(const int16_t *base) { @@ -408,9 +408,9 @@ int16x8_t test_vldrhq_s16(const int16_t *base) // CHECK-LABEL: @test_vldrhq_s32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr [[BASE:%.*]], align 2 -// CHECK-NEXT: [[TMP2:%.*]] = sext <4 x i16> [[TMP1]] to <4 x i32> -// CHECK-NEXT: ret <4 x i32> [[TMP2]] +// CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr [[BASE:%.*]], align 2 +// CHECK-NEXT: [[TMP1:%.*]] = sext <4 x i16> [[TMP0]] to <4 x i32> +// CHECK-NEXT: ret <4 x i32> [[TMP1]] // int32x4_t test_vldrhq_s32(const int16_t *base) { @@ -419,8 +419,8 @@ int32x4_t test_vldrhq_s32(const int16_t *base) // CHECK-LABEL: @test_vldrhq_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr [[BASE:%.*]], align 2 -// CHECK-NEXT: ret <8 x i16> [[TMP1]] +// CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr [[BASE:%.*]], align 2 +// CHECK-NEXT: ret <8 x i16> [[TMP0]] // uint16x8_t test_vldrhq_u16(const uint16_t *base) { @@ -429,9 +429,9 @@ uint16x8_t test_vldrhq_u16(const uint16_t *base) // CHECK-LABEL: @test_vldrhq_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr [[BASE:%.*]], align 2 -// CHECK-NEXT: [[TMP2:%.*]] = zext <4 x i16> [[TMP1]] to <4 x i32> -// CHECK-NEXT: ret <4 x i32> [[TMP2]] +// CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr [[BASE:%.*]], align 2 +// CHECK-NEXT: [[TMP1:%.*]] = zext <4 x i16> [[TMP0]] to <4 x i32> +// CHECK-NEXT: ret <4 x i32> [[TMP1]] // uint32x4_t test_vldrhq_u32(const uint16_t *base) { @@ -440,10 +440,10 @@ uint32x4_t test_vldrhq_u32(const uint16_t *base) // CHECK-LABEL: @test_vldrhq_z_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]]) -// CHECK-NEXT: [[TMP3:%.*]] = call <8 x half> @llvm.masked.load.v8f16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP2]], <8 x half> zeroinitializer) -// CHECK-NEXT: ret <8 x half> [[TMP3]] +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.masked.load.v8f16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]], <8 x half> zeroinitializer) +// CHECK-NEXT: ret <8 x half> [[TMP2]] // float16x8_t test_vldrhq_z_f16(const float16_t *base, mve_pred16_t p) { @@ -452,10 +452,10 @@ float16x8_t test_vldrhq_z_f16(const float16_t *base, mve_pred16_t p) // CHECK-LABEL: @test_vldrhq_z_s16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]]) -// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP2]], <8 x i16> zeroinitializer) -// CHECK-NEXT: ret <8 x i16> [[TMP3]] +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]], <8 x i16> zeroinitializer) +// CHECK-NEXT: ret <8 x i16> [[TMP2]] // int16x8_t test_vldrhq_z_s16(const int16_t *base, mve_pred16_t p) { @@ -464,11 +464,11 @@ int16x8_t test_vldrhq_z_s16(const int16_t *base, mve_pred16_t p) // CHECK-LABEL: @test_vldrhq_z_s32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]]) -// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr [[BASE:%.*]], i32 2, <4 x i1> [[TMP2]], <4 x i16> zeroinitializer) -// CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i16> [[TMP3]] to <4 x i32> -// CHECK-NEXT: ret <4 x i32> [[TMP4]] +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr [[BASE:%.*]], i32 2, <4 x i1> [[TMP1]], <4 x i16> zeroinitializer) +// CHECK-NEXT: [[TMP3:%.*]] = sext <4 x i16> [[TMP2]] to <4 x i32> +// CHECK-NEXT: ret <4 x i32> [[TMP3]] // int32x4_t test_vldrhq_z_s32(const int16_t *base, mve_pred16_t p) { @@ -477,10 +477,10 @@ int32x4_t test_vldrhq_z_s32(const int16_t *base, mve_pred16_t p) // CHECK-LABEL: @test_vldrhq_z_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]]) -// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP2]], <8 x i16> zeroinitializer) -// CHECK-NEXT: ret <8 x i16> [[TMP3]] +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]], <8 x i16> zeroinitializer) +// CHECK-NEXT: ret <8 x i16> [[TMP2]] // uint16x8_t test_vldrhq_z_u16(const uint16_t *base, mve_pred16_t p) { @@ -489,11 +489,11 @@ uint16x8_t test_vldrhq_z_u16(const uint16_t *base, mve_pred16_t p) // CHECK-LABEL: @test_vldrhq_z_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]]) -// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr [[BASE:%.*]], i32 2, <4 x i1> [[TMP2]], <4 x i16> zeroinitializer) -// CHECK-NEXT: [[TMP4:%.*]] = zext <4 x i16> [[TMP3]] to <4 x i32> -// CHECK-NEXT: ret <4 x i32> [[TMP4]] +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr [[BASE:%.*]], i32 2, <4 x i1> [[TMP1]], <4 x i16> zeroinitializer) +// CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i16> [[TMP2]] to <4 x i32> +// CHECK-NEXT: ret <4 x i32> [[TMP3]] // uint32x4_t test_vldrhq_z_u32(const uint16_t *base, mve_pred16_t p) { @@ -502,8 +502,8 @@ uint32x4_t test_vldrhq_z_u32(const uint16_t *base, mve_pred16_t p) // CHECK-LABEL: @test_vldrwq_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[BASE:%.*]], align 4 -// CHECK-NEXT: ret <4 x float> [[TMP1]] +// CHECK-NEXT: [[TMP0:%.*]] = load <4 x float>, ptr [[BASE:%.*]], align 4 +// CHECK-NEXT: ret <4 x float> [[TMP0]] // float32x4_t test_vldrwq_f32(const float32_t *base) { @@ -512,8 +512,8 @@ float32x4_t test_vldrwq_f32(const float32_t *base) // CHECK-LABEL: @test_vldrwq_s32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[BASE:%.*]], align 4 -// CHECK-NEXT: ret <4 x i32> [[TMP1]] +// CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr [[BASE:%.*]], align 4 +// CHECK-NEXT: ret <4 x i32> [[TMP0]] // int32x4_t test_vldrwq_s32(const int32_t *base) { @@ -522,8 +522,8 @@ int32x4_t test_vldrwq_s32(const int32_t *base) // CHECK-LABEL: @test_vldrwq_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[BASE:%.*]], align 4 -// CHECK-NEXT: ret <4 x i32> [[TMP1]] +// CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr [[BASE:%.*]], align 4 +// CHECK-NEXT: ret <4 x i32> [[TMP0]] // uint32x4_t test_vldrwq_u32(const uint32_t *base) { @@ -532,10 +532,10 @@ uint32x4_t test_vldrwq_u32(const uint32_t *base) // CHECK-LABEL: @test_vldrwq_z_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]]) -// CHECK-NEXT: [[TMP3:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP2]], <4 x float> zeroinitializer) -// CHECK-NEXT: ret <4 x float> [[TMP3]] +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]], <4 x float> zeroinitializer) +// CHECK-NEXT: ret <4 x float> [[TMP2]] // float32x4_t test_vldrwq_z_f32(const float32_t *base, mve_pred16_t p) { @@ -544,10 +544,10 @@ float32x4_t test_vldrwq_z_f32(const float32_t *base, mve_pred16_t p) // CHECK-LABEL: @test_vldrwq_z_s32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]]) -// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP2]], <4 x i32> zeroinitializer) -// CHECK-NEXT: ret <4 x i32> [[TMP3]] +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]], <4 x i32> zeroinitializer) +// CHECK-NEXT: ret <4 x i32> [[TMP2]] // int32x4_t test_vldrwq_z_s32(const int32_t *base, mve_pred16_t p) { @@ -556,10 +556,10 @@ int32x4_t test_vldrwq_z_s32(const int32_t *base, mve_pred16_t p) // CHECK-LABEL: @test_vldrwq_z_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]]) -// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP2]], <4 x i32> zeroinitializer) -// CHECK-NEXT: ret <4 x i32> [[TMP3]] +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]], <4 x i32> zeroinitializer) +// CHECK-NEXT: ret <4 x i32> [[TMP2]] // uint32x4_t test_vldrwq_z_u32(const uint32_t *base, mve_pred16_t p) { @@ -680,9 +680,9 @@ void test_vst1q_u32(uint32_t *base, uint32x4_t value) // CHECK-LABEL: @test_vst1q_p_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]]) -// CHECK-NEXT: call void @llvm.masked.store.v8f16.p0(<8 x half> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: call void @llvm.masked.store.v8f16.p0(<8 x half> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]]) // CHECK-NEXT: ret void // void test_vst1q_p_f16(float16_t *base, float16x8_t value, mve_pred16_t p) @@ -696,9 +696,9 @@ void test_vst1q_p_f16(float16_t *base, float16x8_t value, mve_pred16_t p) // CHECK-LABEL: @test_vst1q_p_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]]) -// CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]]) // CHECK-NEXT: ret void // void test_vst1q_p_f32(float32_t *base, float32x4_t value, mve_pred16_t p) @@ -712,9 +712,9 @@ void test_vst1q_p_f32(float32_t *base, float32x4_t value, mve_pred16_t p) // CHECK-LABEL: @test_vst1q_p_s8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]]) -// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]]) // CHECK-NEXT: ret void // void test_vst1q_p_s8(int8_t *base, int8x16_t value, mve_pred16_t p) @@ -728,9 +728,9 @@ void test_vst1q_p_s8(int8_t *base, int8x16_t value, mve_pred16_t p) // CHECK-LABEL: @test_vst1q_p_s16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]]) -// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]]) // CHECK-NEXT: ret void // void test_vst1q_p_s16(int16_t *base, int16x8_t value, mve_pred16_t p) @@ -744,9 +744,9 @@ void test_vst1q_p_s16(int16_t *base, int16x8_t value, mve_pred16_t p) // CHECK-LABEL: @test_vst1q_p_s32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]]) -// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]]) // CHECK-NEXT: ret void // void test_vst1q_p_s32(int32_t *base, int32x4_t value, mve_pred16_t p) @@ -760,9 +760,9 @@ void test_vst1q_p_s32(int32_t *base, int32x4_t value, mve_pred16_t p) // CHECK-LABEL: @test_vst1q_p_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]]) -// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]]) // CHECK-NEXT: ret void // void test_vst1q_p_u8(uint8_t *base, uint8x16_t value, mve_pred16_t p) @@ -776,9 +776,9 @@ void test_vst1q_p_u8(uint8_t *base, uint8x16_t value, mve_pred16_t p) // CHECK-LABEL: @test_vst1q_p_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]]) -// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]]) // CHECK-NEXT: ret void // void test_vst1q_p_u16(uint16_t *base, uint16x8_t value, mve_pred16_t p) @@ -792,9 +792,9 @@ void test_vst1q_p_u16(uint16_t *base, uint16x8_t value, mve_pred16_t p) // CHECK-LABEL: @test_vst1q_p_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]]) -// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]]) // CHECK-NEXT: ret void // void test_vst1q_p_u32(uint32_t *base, uint32x4_t value, mve_pred16_t p) @@ -896,9 +896,9 @@ void test_vstrbq_u32(uint8_t *base, uint32x4_t value) // CHECK-LABEL: @test_vstrbq_p_s8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]]) -// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]]) // CHECK-NEXT: ret void // void test_vstrbq_p_s8(int8_t *base, int8x16_t value, mve_pred16_t p) @@ -913,9 +913,9 @@ void test_vstrbq_p_s8(int8_t *base, int8x16_t value, mve_pred16_t p) // CHECK-LABEL: @test_vstrbq_p_s16( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = trunc <8 x i16> [[VALUE:%.*]] to <8 x i8> -// CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP2]]) -// CHECK-NEXT: call void @llvm.masked.store.v8i8.p0(<8 x i8> [[TMP0]], ptr [[BASE:%.*]], i32 1, <8 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]]) +// CHECK-NEXT: call void @llvm.masked.store.v8i8.p0(<8 x i8> [[TMP0]], ptr [[BASE:%.*]], i32 1, <8 x i1> [[TMP2]]) // CHECK-NEXT: ret void // void test_vstrbq_p_s16(int8_t *base, int16x8_t value, mve_pred16_t p) @@ -930,9 +930,9 @@ void test_vstrbq_p_s16(int8_t *base, int16x8_t value, mve_pred16_t p) // CHECK-LABEL: @test_vstrbq_p_s32( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = trunc <4 x i32> [[VALUE:%.*]] to <4 x i8> -// CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP2]]) -// CHECK-NEXT: call void @llvm.masked.store.v4i8.p0(<4 x i8> [[TMP0]], ptr [[BASE:%.*]], i32 1, <4 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]]) +// CHECK-NEXT: call void @llvm.masked.store.v4i8.p0(<4 x i8> [[TMP0]], ptr [[BASE:%.*]], i32 1, <4 x i1> [[TMP2]]) // CHECK-NEXT: ret void // void test_vstrbq_p_s32(int8_t *base, int32x4_t value, mve_pred16_t p) @@ -946,9 +946,9 @@ void test_vstrbq_p_s32(int8_t *base, int32x4_t value, mve_pred16_t p) // CHECK-LABEL: @test_vstrbq_p_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]]) -// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]]) +// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]]) // CHECK-NEXT: ret void // void test_vstrbq_p_u8(uint8_t *base, uint8x16_t value, mve_pred16_t p) @@ -963,9 +963,9 @@ void test_vstrbq_p_u8(uint8_t *base, uint8x16_t value, mve_pred16_t p) // CHECK-LABEL: @test_vstrbq_p_u16( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = trunc <8 x i16> [[VALUE:%.*]] to <8 x i8> -// CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP3:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP2]]) -// CHECK-NEXT: call void @llvm.masked.store.v8i8.p0(<8 x i8> [[TMP0]], ptr [[BASE:%.*]], i32 1, <8 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]]) +// CHECK-NEXT: call void @llvm.masked.store.v8i8.p0(<8 x i8> [[TMP0]], ptr [[BASE:%.*]], i32 1, <8 x i1> [[TMP2]]) // CHECK-NEXT: ret void // void test_vstrbq_p_u16(uint8_t *base, uint16x8_t value, mve_pred16_t p) @@ -980,9 +980,9 @@ void test_vstrbq_p_u16(uint8_t *base, uint16x8_t value, mve_pred16_t p) // CHECK-LABEL: @test_vstrbq_p_u32( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = trunc <4 x i32> [[VALUE:%.*]] to <4 x i8> -// CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP2]]) -// CHECK-NEXT: call void @llvm.masked.store.v4i8.p0(<4 x i8> [[TMP0]], ptr [[BASE:%.*]], i32 1, <4 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]]) +// CHECK-NEXT: call void @llvm.masked.store.v4i8.p0(<4 x i8> [[TMP0]], ptr [[BASE:%.*]], i32 1, <4 x i1> [[TMP2]]) // CHECK-NEXT: ret void // void test_vstrbq_p_u32(uint8_t *base, uint32x4_t value, mve_pred16_t p) @@ -1068,9 +1068,9 @@ void test_vstrhq_u32(uint16_t *base, uint32x4_t value) // CHECK-LABEL: @test_vstrhq_p_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]]) -// CHECK-NEXT: call void @llvm.masked.store.v8f16.p0(<8 x half> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: call void @llvm.masked.store.v8f16.p0(<8 x half> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]]) // CHECK-NEXT: ret void // void test_vstrhq_p_f16(float16_t *base, float16x8_t value, mve_pred16_t p) @@ -1084,9 +1084,9 @@ void test_vstrhq_p_f16(float16_t *base, float16x8_t value, mve_pred16_t p) // CHECK-LABEL: @test_vstrhq_p_s16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]]) -// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]]) // CHECK-NEXT: ret void // void test_vstrhq_p_s16(int16_t *base, int16x8_t value, mve_pred16_t p) @@ -1101,9 +1101,9 @@ void test_vstrhq_p_s16(int16_t *base, int16x8_t value, mve_pred16_t p) // CHECK-LABEL: @test_vstrhq_p_s32( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = trunc <4 x i32> [[VALUE:%.*]] to <4 x i16> -// CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP2]]) -// CHECK-NEXT: call void @llvm.masked.store.v4i16.p0(<4 x i16> [[TMP0]], ptr [[BASE:%.*]], i32 2, <4 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]]) +// CHECK-NEXT: call void @llvm.masked.store.v4i16.p0(<4 x i16> [[TMP0]], ptr [[BASE:%.*]], i32 2, <4 x i1> [[TMP2]]) // CHECK-NEXT: ret void // void test_vstrhq_p_s32(int16_t *base, int32x4_t value, mve_pred16_t p) @@ -1117,9 +1117,9 @@ void test_vstrhq_p_s32(int16_t *base, int32x4_t value, mve_pred16_t p) // CHECK-LABEL: @test_vstrhq_p_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]]) -// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]]) // CHECK-NEXT: ret void // void test_vstrhq_p_u16(uint16_t *base, uint16x8_t value, mve_pred16_t p) @@ -1134,9 +1134,9 @@ void test_vstrhq_p_u16(uint16_t *base, uint16x8_t value, mve_pred16_t p) // CHECK-LABEL: @test_vstrhq_p_u32( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = trunc <4 x i32> [[VALUE:%.*]] to <4 x i16> -// CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP3:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP2]]) -// CHECK-NEXT: call void @llvm.masked.store.v4i16.p0(<4 x i16> [[TMP0]], ptr [[BASE:%.*]], i32 2, <4 x i1> [[TMP3]]) +// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]]) +// CHECK-NEXT: call void @llvm.masked.store.v4i16.p0(<4 x i16> [[TMP0]], ptr [[BASE:%.*]], i32 2, <4 x i1> [[TMP2]]) // CHECK-NEXT: ret void // void test_vstrhq_p_u32(uint16_t *base, uint32x4_t value, mve_pred16_t p) @@ -1192,9 +1192,9 @@ void test_vstrwq_u32(uint32_t *base, uint32x4_t value) // CHECK-LABEL: @test_vstrwq_p_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]]) -// CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]]) // CHECK-NEXT: ret void // void test_vstrwq_p_f32(float32_t *base, float32x4_t value, mve_pred16_t p) @@ -1208,9 +1208,9 @@ void test_vstrwq_p_f32(float32_t *base, float32x4_t value, mve_pred16_t p) // CHECK-LABEL: @test_vstrwq_p_s32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]]) -// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]]) // CHECK-NEXT: ret void // void test_vstrwq_p_s32(int32_t *base, int32x4_t value, mve_pred16_t p) @@ -1224,9 +1224,9 @@ void test_vstrwq_p_s32(int32_t *base, int32x4_t value, mve_pred16_t p) // CHECK-LABEL: @test_vstrwq_p_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32 -// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]]) -// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP2]]) +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]]) // CHECK-NEXT: ret void // void test_vstrwq_p_u32(uint32_t *base, uint32x4_t value, mve_pred16_t p) diff --git a/clang/test/CodeGen/attr-target-mv.c b/clang/test/CodeGen/attr-target-mv.c index 07f47d9..b8807dd 100644 --- a/clang/test/CodeGen/attr-target-mv.c +++ b/clang/test/CodeGen/attr-target-mv.c @@ -30,6 +30,8 @@ int __attribute__((target("arch=gracemont"))) foo(void) {return 24;} int __attribute__((target("arch=pantherlake"))) foo(void) {return 25;} int __attribute__((target("arch=clearwaterforest"))) foo(void) {return 26;} int __attribute__((target("arch=diamondrapids"))) foo(void) {return 27;} +int __attribute__((target("arch=wildcatlake"))) foo(void) {return 28;} +int __attribute__((target("arch=novalake"))) foo(void) {return 29;} int __attribute__((target("default"))) foo(void) { return 2; } int bar(void) { @@ -203,6 +205,10 @@ void calls_pr50025c(void) { pr50025c(); } // ITANIUM: ret i32 26 // ITANIUM: define{{.*}} i32 @foo.arch_diamondrapids() // ITANIUM: ret i32 27 +// ITANIUM: define{{.*}} i32 @foo.arch_wildcatlake() +// ITANIUM: ret i32 28 +// ITANIUM: define{{.*}} i32 @foo.arch_novalake() +// ITANIUM: ret i32 29 // ITANIUM: define{{.*}} i32 @foo() // ITANIUM: ret i32 2 // ITANIUM: define{{.*}} i32 @bar() @@ -262,6 +268,10 @@ void calls_pr50025c(void) { pr50025c(); } // WINDOWS: ret i32 26 // WINDOWS: define dso_local i32 @foo.arch_diamondrapids() // WINDOWS: ret i32 27 +// WINDOWS: define dso_local i32 @foo.arch_wildcatlake() +// WINDOWS: ret i32 28 +// WINDOWS: define dso_local i32 @foo.arch_novalake() +// WINDOWS: ret i32 29 // WINDOWS: define dso_local i32 @foo() // WINDOWS: ret i32 2 // WINDOWS: define dso_local i32 @bar() diff --git a/clang/test/CodeGen/distributed-thin-lto/supports-hot-cold-new.ll b/clang/test/CodeGen/distributed-thin-lto/supports-hot-cold-new.ll index 08c1a29..90cda3e 100644 --- a/clang/test/CodeGen/distributed-thin-lto/supports-hot-cold-new.ll +++ b/clang/test/CodeGen/distributed-thin-lto/supports-hot-cold-new.ll @@ -22,7 +22,7 @@ ; RUN: %clang -target x86_64-unknown-linux-gnu -O2 -o %t1.o -x ir %t.o -c -fthinlto-index=%t.o.thinlto.bc -save-temps=obj -; RUN: llvm-dis %t.s.3.import.bc -o - | FileCheck %s --check-prefix=CHECK-IR +; RUN: llvm-dis %t.s.4.opt.bc -o - | FileCheck %s --check-prefix=CHECK-IR ; CHECK-IR: !memprof {{.*}} !callsite ; CHECK-IR: "memprof"="cold" @@ -42,10 +42,15 @@ ; RUN: %clang -target x86_64-unknown-linux-gnu -O2 -o %t1.o -x ir %t.o -c -fthinlto-index=%t.o.thinlto.bc -save-temps=obj -; RUN: llvm-dis %t.s.3.import.bc -o - | FileCheck %s \ +; RUN: llvm-dis %t.s.4.opt.bc -o - | FileCheck %s \ ; RUN: --implicit-check-not "!memprof" --implicit-check-not "!callsite" \ ; RUN: --implicit-check-not "memprof"="cold" +;; Ensure the attributes and metadata are stripped when running a non-LTO pipeline. +; RUN: %clang -target x86_64-unknown-linux-gnu -O2 -x ir %t.o -S -emit-llvm -o - | FileCheck %s \ +; RUN: --implicit-check-not "!memprof" --implicit-check-not "!callsite" \ +; RUN: --implicit-check-not "memprof"="cold" + source_filename = "thinlto-distributed-supports-hot-cold-new.ll" target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" diff --git a/clang/test/CodeGen/target-builtin-noerror.c b/clang/test/CodeGen/target-builtin-noerror.c index 120f1a5..47d5ae5 100644 --- a/clang/test/CodeGen/target-builtin-noerror.c +++ b/clang/test/CodeGen/target-builtin-noerror.c @@ -178,6 +178,8 @@ void verifycpustrings(void) { (void)__builtin_cpu_is("lunarlake"); (void)__builtin_cpu_is("clearwaterforest"); (void)__builtin_cpu_is("pantherlake"); + (void)__builtin_cpu_is("wildcatlake"); + (void)__builtin_cpu_is("novalake"); (void)__builtin_cpu_is("haswell"); (void)__builtin_cpu_is("icelake-client"); (void)__builtin_cpu_is("icelake-server"); diff --git a/clang/test/CodeGen/unified-lto-module-flag.ll b/clang/test/CodeGen/unified-lto-module-flag.ll new file mode 100644 index 0000000..deefe82 --- /dev/null +++ b/clang/test/CodeGen/unified-lto-module-flag.ll @@ -0,0 +1,11 @@ +; Test that we do not duplicate the UnifiedLTO module flag. +; +; RUN: %clang_cc1 -emit-llvm -flto=full -funified-lto -o - %s | FileCheck %s + +; CHECK: !llvm.module.flags = !{!0, !1, !2, !3} +!llvm.module.flags = !{!0, !1, !2, !3} + +!0 = !{i32 1, !"wchar_size", i32 2} +!1 = !{i32 7, !"frame-pointer", i32 2} +!2 = !{i32 1, !"EnableSplitLTOUnit", i32 1} +!3 = !{i32 1, !"UnifiedLTO", i32 1} |