// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \ // RUN: -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg,sroa | \ // RUN: FileCheck -check-prefixes=CHECK-A64 %s // RUN: %clang_cc1 -triple armv8-none-linux-gnueabi -target-feature +neon \ // RUN: -target-feature +fp16 -disable-O0-optnone -emit-llvm -o - %s | \ // RUN: opt -S -passes=mem2reg,sroa | FileCheck -check-prefixes=CHECK-A32 %s // REQUIRES: aarch64-registered-target || arm-registered-target #include // CHECK-A64-LABEL: define dso_local void @test_vst1_f16_x2( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [2 x <4 x half>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x <4 x half>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <4 x half> [[B_COERCE_FCA_0_EXTRACT]] to <4 x i16> // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x <4 x half>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <4 x half> [[B_COERCE_FCA_1_EXTRACT]] to <4 x i16> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <4 x i16> [[TMP0]] to <8 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <4 x i16> [[TMP1]] to <8 x i8> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP2]] to <4 x half> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP3]] to <4 x half> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x2.v4f16.p0(<4 x half> [[TMP4]], <4 x half> [[TMP5]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_f16_x2( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [2 x i64] [[B_COERCE:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <4 x half> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <4 x half> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <4 x half> [[TMP0]] to <8 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <4 x half> [[TMP1]] to <8 x i8> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP2]] to <4 x i16> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP3]] to <4 x i16> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x2.p0.v4i16(ptr [[A]], <4 x i16> [[TMP4]], <4 x i16> [[TMP5]]) // CHECK-A32-NEXT: ret void // void test_vst1_f16_x2(float16_t *a, float16x4x2_t b) { vst1_f16_x2(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_f16_x3( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [3 x <4 x half>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x <4 x half>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <4 x half> [[B_COERCE_FCA_0_EXTRACT]] to <4 x i16> // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x <4 x half>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <4 x half> [[B_COERCE_FCA_1_EXTRACT]] to <4 x i16> // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x <4 x half>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <4 x half> [[B_COERCE_FCA_2_EXTRACT]] to <4 x i16> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <4 x i16> [[TMP0]] to <8 x i8> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP1]] to <8 x i8> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <4 x i16> [[TMP2]] to <8 x i8> // CHECK-A64-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP3]] to <4 x half> // CHECK-A64-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x half> // CHECK-A64-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x half> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x3.v4f16.p0(<4 x half> [[TMP6]], <4 x half> [[TMP7]], <4 x half> [[TMP8]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_f16_x3( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [3 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <4 x half> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <4 x half> // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast i64 [[B_COERCE_FCA_2_EXTRACT]] to <4 x half> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <4 x half> [[TMP0]] to <8 x i8> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <4 x half> [[TMP1]] to <8 x i8> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <4 x half> [[TMP2]] to <8 x i8> // CHECK-A32-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP3]] to <4 x i16> // CHECK-A32-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x i16> // CHECK-A32-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x i16> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x3.p0.v4i16(ptr [[A]], <4 x i16> [[TMP6]], <4 x i16> [[TMP7]], <4 x i16> [[TMP8]]) // CHECK-A32-NEXT: ret void // void test_vst1_f16_x3(float16_t *a, float16x4x3_t b) { vst1_f16_x3(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_f16_x4( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [4 x <4 x half>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x <4 x half>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <4 x half> [[B_COERCE_FCA_0_EXTRACT]] to <4 x i16> // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x <4 x half>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <4 x half> [[B_COERCE_FCA_1_EXTRACT]] to <4 x i16> // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x <4 x half>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <4 x half> [[B_COERCE_FCA_2_EXTRACT]] to <4 x i16> // CHECK-A64-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x <4 x half>] [[B_COERCE]], 3 // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <4 x half> [[B_COERCE_FCA_3_EXTRACT]] to <4 x i16> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP0]] to <8 x i8> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <4 x i16> [[TMP1]] to <8 x i8> // CHECK-A64-NEXT: [[TMP6:%.*]] = bitcast <4 x i16> [[TMP2]] to <8 x i8> // CHECK-A64-NEXT: [[TMP7:%.*]] = bitcast <4 x i16> [[TMP3]] to <8 x i8> // CHECK-A64-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x half> // CHECK-A64-NEXT: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x half> // CHECK-A64-NEXT: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x half> // CHECK-A64-NEXT: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x half> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x4.v4f16.p0(<4 x half> [[TMP8]], <4 x half> [[TMP9]], <4 x half> [[TMP10]], <4 x half> [[TMP11]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_f16_x4( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [4 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <4 x half> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <4 x half> // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast i64 [[B_COERCE_FCA_2_EXTRACT]] to <4 x half> // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast i64 [[B_COERCE_FCA_3_EXTRACT]] to <4 x half> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <4 x half> [[TMP0]] to <8 x i8> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <4 x half> [[TMP1]] to <8 x i8> // CHECK-A32-NEXT: [[TMP6:%.*]] = bitcast <4 x half> [[TMP2]] to <8 x i8> // CHECK-A32-NEXT: [[TMP7:%.*]] = bitcast <4 x half> [[TMP3]] to <8 x i8> // CHECK-A32-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x i16> // CHECK-A32-NEXT: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x i16> // CHECK-A32-NEXT: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16> // CHECK-A32-NEXT: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x i16> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x4.p0.v4i16(ptr [[A]], <4 x i16> [[TMP8]], <4 x i16> [[TMP9]], <4 x i16> [[TMP10]], <4 x i16> [[TMP11]]) // CHECK-A32-NEXT: ret void // void test_vst1_f16_x4(float16_t *a, float16x4x4_t b) { vst1_f16_x4(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_f32_x2( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [2 x <2 x float>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x <2 x float>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[B_COERCE_FCA_0_EXTRACT]] to <2 x i32> // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x <2 x float>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <2 x float> [[B_COERCE_FCA_1_EXTRACT]] to <2 x i32> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <2 x i32> [[TMP1]] to <8 x i8> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP2]] to <2 x float> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x float> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x2.v2f32.p0(<2 x float> [[TMP4]], <2 x float> [[TMP5]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_f32_x2( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [2 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <2 x float> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <2 x float> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <2 x float> [[TMP0]] to <8 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <2 x float> [[TMP1]] to <8 x i8> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP2]] to <2 x float> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x float> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x2.p0.v2f32(ptr [[A]], <2 x float> [[TMP4]], <2 x float> [[TMP5]]) // CHECK-A32-NEXT: ret void // void test_vst1_f32_x2(float32_t *a, float32x2x2_t b) { vst1_f32_x2(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_f32_x3( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [3 x <2 x float>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x <2 x float>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[B_COERCE_FCA_0_EXTRACT]] to <2 x i32> // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x <2 x float>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <2 x float> [[B_COERCE_FCA_1_EXTRACT]] to <2 x i32> // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x <2 x float>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <2 x float> [[B_COERCE_FCA_2_EXTRACT]] to <2 x i32> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <2 x i32> [[TMP1]] to <8 x i8> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <2 x i32> [[TMP2]] to <8 x i8> // CHECK-A64-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x float> // CHECK-A64-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <2 x float> // CHECK-A64-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <2 x float> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x3.v2f32.p0(<2 x float> [[TMP6]], <2 x float> [[TMP7]], <2 x float> [[TMP8]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_f32_x3( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [3 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <2 x float> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <2 x float> // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast i64 [[B_COERCE_FCA_2_EXTRACT]] to <2 x float> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <2 x float> [[TMP0]] to <8 x i8> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <2 x float> [[TMP1]] to <8 x i8> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <2 x float> [[TMP2]] to <8 x i8> // CHECK-A32-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x float> // CHECK-A32-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <2 x float> // CHECK-A32-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <2 x float> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x3.p0.v2f32(ptr [[A]], <2 x float> [[TMP6]], <2 x float> [[TMP7]], <2 x float> [[TMP8]]) // CHECK-A32-NEXT: ret void // void test_vst1_f32_x3(float32_t *a, float32x2x3_t b) { vst1_f32_x3(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_f32_x4( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [4 x <2 x float>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x <2 x float>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[B_COERCE_FCA_0_EXTRACT]] to <2 x i32> // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x <2 x float>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <2 x float> [[B_COERCE_FCA_1_EXTRACT]] to <2 x i32> // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x <2 x float>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <2 x float> [[B_COERCE_FCA_2_EXTRACT]] to <2 x i32> // CHECK-A64-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x <2 x float>] [[B_COERCE]], 3 // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <2 x float> [[B_COERCE_FCA_3_EXTRACT]] to <2 x i32> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <2 x i32> [[TMP1]] to <8 x i8> // CHECK-A64-NEXT: [[TMP6:%.*]] = bitcast <2 x i32> [[TMP2]] to <8 x i8> // CHECK-A64-NEXT: [[TMP7:%.*]] = bitcast <2 x i32> [[TMP3]] to <8 x i8> // CHECK-A64-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP4]] to <2 x float> // CHECK-A64-NEXT: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP5]] to <2 x float> // CHECK-A64-NEXT: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <2 x float> // CHECK-A64-NEXT: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP7]] to <2 x float> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x4.v2f32.p0(<2 x float> [[TMP8]], <2 x float> [[TMP9]], <2 x float> [[TMP10]], <2 x float> [[TMP11]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_f32_x4( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [4 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <2 x float> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <2 x float> // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast i64 [[B_COERCE_FCA_2_EXTRACT]] to <2 x float> // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast i64 [[B_COERCE_FCA_3_EXTRACT]] to <2 x float> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <2 x float> [[TMP0]] to <8 x i8> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <2 x float> [[TMP1]] to <8 x i8> // CHECK-A32-NEXT: [[TMP6:%.*]] = bitcast <2 x float> [[TMP2]] to <8 x i8> // CHECK-A32-NEXT: [[TMP7:%.*]] = bitcast <2 x float> [[TMP3]] to <8 x i8> // CHECK-A32-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP4]] to <2 x float> // CHECK-A32-NEXT: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP5]] to <2 x float> // CHECK-A32-NEXT: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <2 x float> // CHECK-A32-NEXT: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP7]] to <2 x float> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x4.p0.v2f32(ptr [[A]], <2 x float> [[TMP8]], <2 x float> [[TMP9]], <2 x float> [[TMP10]], <2 x float> [[TMP11]]) // CHECK-A32-NEXT: ret void // void test_vst1_f32_x4(float32_t *a, float32x2x4_t b) { vst1_f32_x4(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_p16_x2( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [2 x <4 x i16>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x <4 x i16>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x <4 x i16>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x2.v4i16.p0(<4 x i16> [[TMP2]], <4 x i16> [[TMP3]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_p16_x2( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [2 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <4 x i16> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <4 x i16> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <4 x i16> [[TMP0]] to <8 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <4 x i16> [[TMP1]] to <8 x i8> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP2]] to <4 x i16> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP3]] to <4 x i16> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x2.p0.v4i16(ptr [[A]], <4 x i16> [[TMP4]], <4 x i16> [[TMP5]]) // CHECK-A32-NEXT: ret void // void test_vst1_p16_x2(poly16_t *a, poly16x4x2_t b) { vst1_p16_x2(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_p16_x3( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [3 x <4 x i16>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x <4 x i16>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x <4 x i16>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x <4 x i16>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <4 x i16> [[B_COERCE_FCA_2_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP2]] to <4 x i16> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x3.v4i16.p0(<4 x i16> [[TMP3]], <4 x i16> [[TMP4]], <4 x i16> [[TMP5]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_p16_x3( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [3 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <4 x i16> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <4 x i16> // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast i64 [[B_COERCE_FCA_2_EXTRACT]] to <4 x i16> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <4 x i16> [[TMP0]] to <8 x i8> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP1]] to <8 x i8> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <4 x i16> [[TMP2]] to <8 x i8> // CHECK-A32-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP3]] to <4 x i16> // CHECK-A32-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x i16> // CHECK-A32-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x i16> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x3.p0.v4i16(ptr [[A]], <4 x i16> [[TMP6]], <4 x i16> [[TMP7]], <4 x i16> [[TMP8]]) // CHECK-A32-NEXT: ret void // void test_vst1_p16_x3(poly16_t *a, poly16x4x3_t b) { vst1_p16_x3(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_p16_x4( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [4 x <4 x i16>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x <4 x i16>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x <4 x i16>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x <4 x i16>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x <4 x i16>] [[B_COERCE]], 3 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <4 x i16> [[B_COERCE_FCA_2_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <4 x i16> [[B_COERCE_FCA_3_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> // CHECK-A64-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP2]] to <4 x i16> // CHECK-A64-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP3]] to <4 x i16> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x4.v4i16.p0(<4 x i16> [[TMP4]], <4 x i16> [[TMP5]], <4 x i16> [[TMP6]], <4 x i16> [[TMP7]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_p16_x4( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [4 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <4 x i16> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <4 x i16> // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast i64 [[B_COERCE_FCA_2_EXTRACT]] to <4 x i16> // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast i64 [[B_COERCE_FCA_3_EXTRACT]] to <4 x i16> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP0]] to <8 x i8> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <4 x i16> [[TMP1]] to <8 x i8> // CHECK-A32-NEXT: [[TMP6:%.*]] = bitcast <4 x i16> [[TMP2]] to <8 x i8> // CHECK-A32-NEXT: [[TMP7:%.*]] = bitcast <4 x i16> [[TMP3]] to <8 x i8> // CHECK-A32-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x i16> // CHECK-A32-NEXT: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x i16> // CHECK-A32-NEXT: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16> // CHECK-A32-NEXT: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x i16> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x4.p0.v4i16(ptr [[A]], <4 x i16> [[TMP8]], <4 x i16> [[TMP9]], <4 x i16> [[TMP10]], <4 x i16> [[TMP11]]) // CHECK-A32-NEXT: ret void // void test_vst1_p16_x4(poly16_t *a, poly16x4x4_t b) { vst1_p16_x4(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_p8_x2( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [2 x <8 x i8>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x <8 x i8>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x <8 x i8>] [[B_COERCE]], 1 // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x2.v8i8.p0(<8 x i8> [[B_COERCE_FCA_0_EXTRACT]], <8 x i8> [[B_COERCE_FCA_1_EXTRACT]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_p8_x2( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [2 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x2.p0.v8i8(ptr [[A]], <8 x i8> [[TMP0]], <8 x i8> [[TMP1]]) // CHECK-A32-NEXT: ret void // void test_vst1_p8_x2(poly8_t *a, poly8x8x2_t b) { vst1_p8_x2(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_p8_x3( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [3 x <8 x i8>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x <8 x i8>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x <8 x i8>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x <8 x i8>] [[B_COERCE]], 2 // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x3.v8i8.p0(<8 x i8> [[B_COERCE_FCA_0_EXTRACT]], <8 x i8> [[B_COERCE_FCA_1_EXTRACT]], <8 x i8> [[B_COERCE_FCA_2_EXTRACT]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_p8_x3( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [3 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast i64 [[B_COERCE_FCA_2_EXTRACT]] to <8 x i8> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x3.p0.v8i8(ptr [[A]], <8 x i8> [[TMP0]], <8 x i8> [[TMP1]], <8 x i8> [[TMP2]]) // CHECK-A32-NEXT: ret void // void test_vst1_p8_x3(poly8_t *a, poly8x8x3_t b) { vst1_p8_x3(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_p8_x4( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [4 x <8 x i8>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x <8 x i8>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x <8 x i8>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x <8 x i8>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x <8 x i8>] [[B_COERCE]], 3 // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x4.v8i8.p0(<8 x i8> [[B_COERCE_FCA_0_EXTRACT]], <8 x i8> [[B_COERCE_FCA_1_EXTRACT]], <8 x i8> [[B_COERCE_FCA_2_EXTRACT]], <8 x i8> [[B_COERCE_FCA_3_EXTRACT]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_p8_x4( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [4 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast i64 [[B_COERCE_FCA_2_EXTRACT]] to <8 x i8> // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast i64 [[B_COERCE_FCA_3_EXTRACT]] to <8 x i8> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x4.p0.v8i8(ptr [[A]], <8 x i8> [[TMP0]], <8 x i8> [[TMP1]], <8 x i8> [[TMP2]], <8 x i8> [[TMP3]]) // CHECK-A32-NEXT: ret void // void test_vst1_p8_x4(poly8_t *a, poly8x8x4_t b) { vst1_p8_x4(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_s16_x2( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [2 x <4 x i16>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x <4 x i16>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x <4 x i16>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x2.v4i16.p0(<4 x i16> [[TMP2]], <4 x i16> [[TMP3]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_s16_x2( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [2 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <4 x i16> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <4 x i16> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <4 x i16> [[TMP0]] to <8 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <4 x i16> [[TMP1]] to <8 x i8> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP2]] to <4 x i16> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP3]] to <4 x i16> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x2.p0.v4i16(ptr [[A]], <4 x i16> [[TMP4]], <4 x i16> [[TMP5]]) // CHECK-A32-NEXT: ret void // void test_vst1_s16_x2(int16_t *a, int16x4x2_t b) { vst1_s16_x2(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_s16_x3( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [3 x <4 x i16>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x <4 x i16>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x <4 x i16>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x <4 x i16>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <4 x i16> [[B_COERCE_FCA_2_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP2]] to <4 x i16> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x3.v4i16.p0(<4 x i16> [[TMP3]], <4 x i16> [[TMP4]], <4 x i16> [[TMP5]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_s16_x3( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [3 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <4 x i16> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <4 x i16> // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast i64 [[B_COERCE_FCA_2_EXTRACT]] to <4 x i16> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <4 x i16> [[TMP0]] to <8 x i8> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP1]] to <8 x i8> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <4 x i16> [[TMP2]] to <8 x i8> // CHECK-A32-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP3]] to <4 x i16> // CHECK-A32-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x i16> // CHECK-A32-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x i16> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x3.p0.v4i16(ptr [[A]], <4 x i16> [[TMP6]], <4 x i16> [[TMP7]], <4 x i16> [[TMP8]]) // CHECK-A32-NEXT: ret void // void test_vst1_s16_x3(int16_t *a, int16x4x3_t b) { vst1_s16_x3(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_s16_x4( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [4 x <4 x i16>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x <4 x i16>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x <4 x i16>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x <4 x i16>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x <4 x i16>] [[B_COERCE]], 3 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <4 x i16> [[B_COERCE_FCA_2_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <4 x i16> [[B_COERCE_FCA_3_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> // CHECK-A64-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP2]] to <4 x i16> // CHECK-A64-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP3]] to <4 x i16> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x4.v4i16.p0(<4 x i16> [[TMP4]], <4 x i16> [[TMP5]], <4 x i16> [[TMP6]], <4 x i16> [[TMP7]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_s16_x4( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [4 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <4 x i16> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <4 x i16> // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast i64 [[B_COERCE_FCA_2_EXTRACT]] to <4 x i16> // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast i64 [[B_COERCE_FCA_3_EXTRACT]] to <4 x i16> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP0]] to <8 x i8> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <4 x i16> [[TMP1]] to <8 x i8> // CHECK-A32-NEXT: [[TMP6:%.*]] = bitcast <4 x i16> [[TMP2]] to <8 x i8> // CHECK-A32-NEXT: [[TMP7:%.*]] = bitcast <4 x i16> [[TMP3]] to <8 x i8> // CHECK-A32-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x i16> // CHECK-A32-NEXT: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x i16> // CHECK-A32-NEXT: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16> // CHECK-A32-NEXT: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x i16> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x4.p0.v4i16(ptr [[A]], <4 x i16> [[TMP8]], <4 x i16> [[TMP9]], <4 x i16> [[TMP10]], <4 x i16> [[TMP11]]) // CHECK-A32-NEXT: ret void // void test_vst1_s16_x4(int16_t *a, int16x4x4_t b) { vst1_s16_x4(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_s32_x2( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [2 x <2 x i32>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x <2 x i32>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x <2 x i32>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x2.v2i32.p0(<2 x i32> [[TMP2]], <2 x i32> [[TMP3]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_s32_x2( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [2 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <2 x i32> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <2 x i32> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <2 x i32> [[TMP1]] to <8 x i8> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP2]] to <2 x i32> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x i32> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x2.p0.v2i32(ptr [[A]], <2 x i32> [[TMP4]], <2 x i32> [[TMP5]]) // CHECK-A32-NEXT: ret void // void test_vst1_s32_x2(int32_t *a, int32x2x2_t b) { vst1_s32_x2(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_s32_x3( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [3 x <2 x i32>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x <2 x i32>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x <2 x i32>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x <2 x i32>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <2 x i32> [[B_COERCE_FCA_2_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP2]] to <2 x i32> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x3.v2i32.p0(<2 x i32> [[TMP3]], <2 x i32> [[TMP4]], <2 x i32> [[TMP5]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_s32_x3( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [3 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <2 x i32> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <2 x i32> // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast i64 [[B_COERCE_FCA_2_EXTRACT]] to <2 x i32> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <2 x i32> [[TMP1]] to <8 x i8> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <2 x i32> [[TMP2]] to <8 x i8> // CHECK-A32-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x i32> // CHECK-A32-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <2 x i32> // CHECK-A32-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <2 x i32> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x3.p0.v2i32(ptr [[A]], <2 x i32> [[TMP6]], <2 x i32> [[TMP7]], <2 x i32> [[TMP8]]) // CHECK-A32-NEXT: ret void // void test_vst1_s32_x3(int32_t *a, int32x2x3_t b) { vst1_s32_x3(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_s32_x4( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [4 x <2 x i32>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x <2 x i32>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x <2 x i32>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x <2 x i32>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x <2 x i32>] [[B_COERCE]], 3 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <2 x i32> [[B_COERCE_FCA_2_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <2 x i32> [[B_COERCE_FCA_3_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> // CHECK-A64-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP2]] to <2 x i32> // CHECK-A64-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x i32> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x4.v2i32.p0(<2 x i32> [[TMP4]], <2 x i32> [[TMP5]], <2 x i32> [[TMP6]], <2 x i32> [[TMP7]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_s32_x4( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [4 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <2 x i32> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <2 x i32> // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast i64 [[B_COERCE_FCA_2_EXTRACT]] to <2 x i32> // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast i64 [[B_COERCE_FCA_3_EXTRACT]] to <2 x i32> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <2 x i32> [[TMP1]] to <8 x i8> // CHECK-A32-NEXT: [[TMP6:%.*]] = bitcast <2 x i32> [[TMP2]] to <8 x i8> // CHECK-A32-NEXT: [[TMP7:%.*]] = bitcast <2 x i32> [[TMP3]] to <8 x i8> // CHECK-A32-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP4]] to <2 x i32> // CHECK-A32-NEXT: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP5]] to <2 x i32> // CHECK-A32-NEXT: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <2 x i32> // CHECK-A32-NEXT: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP7]] to <2 x i32> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x4.p0.v2i32(ptr [[A]], <2 x i32> [[TMP8]], <2 x i32> [[TMP9]], <2 x i32> [[TMP10]], <2 x i32> [[TMP11]]) // CHECK-A32-NEXT: ret void // void test_vst1_s32_x4(int32_t *a, int32x2x4_t b) { vst1_s32_x4(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_s64_x2( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [2 x <1 x i64>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x <1 x i64>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x <1 x i64>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x2.v1i64.p0(<1 x i64> [[TMP2]], <1 x i64> [[TMP3]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_s64_x2( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [2 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <1 x i64> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <1 x i64> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <1 x i64> [[TMP0]] to <8 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <1 x i64> [[TMP1]] to <8 x i8> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP2]] to <1 x i64> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP3]] to <1 x i64> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x2.p0.v1i64(ptr [[A]], <1 x i64> [[TMP4]], <1 x i64> [[TMP5]]) // CHECK-A32-NEXT: ret void // void test_vst1_s64_x2(int64_t *a, int64x1x2_t b) { vst1_s64_x2(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_s64_x3( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [3 x <1 x i64>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x <1 x i64>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x <1 x i64>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x <1 x i64>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <1 x i64> [[B_COERCE_FCA_2_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP2]] to <1 x i64> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x3.v1i64.p0(<1 x i64> [[TMP3]], <1 x i64> [[TMP4]], <1 x i64> [[TMP5]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_s64_x3( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [3 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <1 x i64> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <1 x i64> // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast i64 [[B_COERCE_FCA_2_EXTRACT]] to <1 x i64> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <1 x i64> [[TMP0]] to <8 x i8> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <1 x i64> [[TMP1]] to <8 x i8> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <1 x i64> [[TMP2]] to <8 x i8> // CHECK-A32-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP3]] to <1 x i64> // CHECK-A32-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x i64> // CHECK-A32-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <1 x i64> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x3.p0.v1i64(ptr [[A]], <1 x i64> [[TMP6]], <1 x i64> [[TMP7]], <1 x i64> [[TMP8]]) // CHECK-A32-NEXT: ret void // void test_vst1_s64_x3(int64_t *a, int64x1x3_t b) { vst1_s64_x3(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_s64_x4( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [4 x <1 x i64>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x <1 x i64>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x <1 x i64>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x <1 x i64>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x <1 x i64>] [[B_COERCE]], 3 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <1 x i64> [[B_COERCE_FCA_2_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <1 x i64> [[B_COERCE_FCA_3_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> // CHECK-A64-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP2]] to <1 x i64> // CHECK-A64-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP3]] to <1 x i64> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x4.v1i64.p0(<1 x i64> [[TMP4]], <1 x i64> [[TMP5]], <1 x i64> [[TMP6]], <1 x i64> [[TMP7]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_s64_x4( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [4 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <1 x i64> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <1 x i64> // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast i64 [[B_COERCE_FCA_2_EXTRACT]] to <1 x i64> // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast i64 [[B_COERCE_FCA_3_EXTRACT]] to <1 x i64> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <1 x i64> [[TMP0]] to <8 x i8> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <1 x i64> [[TMP1]] to <8 x i8> // CHECK-A32-NEXT: [[TMP6:%.*]] = bitcast <1 x i64> [[TMP2]] to <8 x i8> // CHECK-A32-NEXT: [[TMP7:%.*]] = bitcast <1 x i64> [[TMP3]] to <8 x i8> // CHECK-A32-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x i64> // CHECK-A32-NEXT: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP5]] to <1 x i64> // CHECK-A32-NEXT: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <1 x i64> // CHECK-A32-NEXT: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP7]] to <1 x i64> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x4.p0.v1i64(ptr [[A]], <1 x i64> [[TMP8]], <1 x i64> [[TMP9]], <1 x i64> [[TMP10]], <1 x i64> [[TMP11]]) // CHECK-A32-NEXT: ret void // void test_vst1_s64_x4(int64_t *a, int64x1x4_t b) { vst1_s64_x4(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_s8_x2( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [2 x <8 x i8>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x <8 x i8>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x <8 x i8>] [[B_COERCE]], 1 // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x2.v8i8.p0(<8 x i8> [[B_COERCE_FCA_0_EXTRACT]], <8 x i8> [[B_COERCE_FCA_1_EXTRACT]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_s8_x2( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [2 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x2.p0.v8i8(ptr [[A]], <8 x i8> [[TMP0]], <8 x i8> [[TMP1]]) // CHECK-A32-NEXT: ret void // void test_vst1_s8_x2(int8_t *a, int8x8x2_t b) { vst1_s8_x2(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_s8_x3( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [3 x <8 x i8>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x <8 x i8>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x <8 x i8>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x <8 x i8>] [[B_COERCE]], 2 // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x3.v8i8.p0(<8 x i8> [[B_COERCE_FCA_0_EXTRACT]], <8 x i8> [[B_COERCE_FCA_1_EXTRACT]], <8 x i8> [[B_COERCE_FCA_2_EXTRACT]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_s8_x3( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [3 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast i64 [[B_COERCE_FCA_2_EXTRACT]] to <8 x i8> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x3.p0.v8i8(ptr [[A]], <8 x i8> [[TMP0]], <8 x i8> [[TMP1]], <8 x i8> [[TMP2]]) // CHECK-A32-NEXT: ret void // void test_vst1_s8_x3(int8_t *a, int8x8x3_t b) { vst1_s8_x3(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_s8_x4( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [4 x <8 x i8>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x <8 x i8>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x <8 x i8>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x <8 x i8>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x <8 x i8>] [[B_COERCE]], 3 // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x4.v8i8.p0(<8 x i8> [[B_COERCE_FCA_0_EXTRACT]], <8 x i8> [[B_COERCE_FCA_1_EXTRACT]], <8 x i8> [[B_COERCE_FCA_2_EXTRACT]], <8 x i8> [[B_COERCE_FCA_3_EXTRACT]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_s8_x4( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [4 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast i64 [[B_COERCE_FCA_2_EXTRACT]] to <8 x i8> // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast i64 [[B_COERCE_FCA_3_EXTRACT]] to <8 x i8> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x4.p0.v8i8(ptr [[A]], <8 x i8> [[TMP0]], <8 x i8> [[TMP1]], <8 x i8> [[TMP2]], <8 x i8> [[TMP3]]) // CHECK-A32-NEXT: ret void // void test_vst1_s8_x4(int8_t *a, int8x8x4_t b) { vst1_s8_x4(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_u16_x2( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [2 x <4 x i16>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x <4 x i16>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x <4 x i16>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x2.v4i16.p0(<4 x i16> [[TMP2]], <4 x i16> [[TMP3]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_u16_x2( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [2 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <4 x i16> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <4 x i16> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <4 x i16> [[TMP0]] to <8 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <4 x i16> [[TMP1]] to <8 x i8> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP2]] to <4 x i16> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP3]] to <4 x i16> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x2.p0.v4i16(ptr [[A]], <4 x i16> [[TMP4]], <4 x i16> [[TMP5]]) // CHECK-A32-NEXT: ret void // void test_vst1_u16_x2(uint16_t *a, uint16x4x2_t b) { vst1_u16_x2(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_u16_x3( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [3 x <4 x i16>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x <4 x i16>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x <4 x i16>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x <4 x i16>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <4 x i16> [[B_COERCE_FCA_2_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP2]] to <4 x i16> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x3.v4i16.p0(<4 x i16> [[TMP3]], <4 x i16> [[TMP4]], <4 x i16> [[TMP5]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_u16_x3( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [3 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <4 x i16> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <4 x i16> // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast i64 [[B_COERCE_FCA_2_EXTRACT]] to <4 x i16> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <4 x i16> [[TMP0]] to <8 x i8> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP1]] to <8 x i8> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <4 x i16> [[TMP2]] to <8 x i8> // CHECK-A32-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP3]] to <4 x i16> // CHECK-A32-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x i16> // CHECK-A32-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x i16> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x3.p0.v4i16(ptr [[A]], <4 x i16> [[TMP6]], <4 x i16> [[TMP7]], <4 x i16> [[TMP8]]) // CHECK-A32-NEXT: ret void // void test_vst1_u16_x3(uint16_t *a, uint16x4x3_t b) { vst1_u16_x3(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_u16_x4( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [4 x <4 x i16>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x <4 x i16>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x <4 x i16>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x <4 x i16>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x <4 x i16>] [[B_COERCE]], 3 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <4 x i16> [[B_COERCE_FCA_2_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <4 x i16> [[B_COERCE_FCA_3_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> // CHECK-A64-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP2]] to <4 x i16> // CHECK-A64-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP3]] to <4 x i16> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x4.v4i16.p0(<4 x i16> [[TMP4]], <4 x i16> [[TMP5]], <4 x i16> [[TMP6]], <4 x i16> [[TMP7]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_u16_x4( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [4 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <4 x i16> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <4 x i16> // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast i64 [[B_COERCE_FCA_2_EXTRACT]] to <4 x i16> // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast i64 [[B_COERCE_FCA_3_EXTRACT]] to <4 x i16> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP0]] to <8 x i8> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <4 x i16> [[TMP1]] to <8 x i8> // CHECK-A32-NEXT: [[TMP6:%.*]] = bitcast <4 x i16> [[TMP2]] to <8 x i8> // CHECK-A32-NEXT: [[TMP7:%.*]] = bitcast <4 x i16> [[TMP3]] to <8 x i8> // CHECK-A32-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP4]] to <4 x i16> // CHECK-A32-NEXT: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP5]] to <4 x i16> // CHECK-A32-NEXT: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <4 x i16> // CHECK-A32-NEXT: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP7]] to <4 x i16> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x4.p0.v4i16(ptr [[A]], <4 x i16> [[TMP8]], <4 x i16> [[TMP9]], <4 x i16> [[TMP10]], <4 x i16> [[TMP11]]) // CHECK-A32-NEXT: ret void // void test_vst1_u16_x4(uint16_t *a, uint16x4x4_t b) { vst1_u16_x4(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_u32_x2( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [2 x <2 x i32>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x <2 x i32>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x <2 x i32>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x2.v2i32.p0(<2 x i32> [[TMP2]], <2 x i32> [[TMP3]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_u32_x2( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [2 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <2 x i32> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <2 x i32> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <2 x i32> [[TMP1]] to <8 x i8> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP2]] to <2 x i32> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x i32> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x2.p0.v2i32(ptr [[A]], <2 x i32> [[TMP4]], <2 x i32> [[TMP5]]) // CHECK-A32-NEXT: ret void // void test_vst1_u32_x2(uint32_t *a, uint32x2x2_t b) { vst1_u32_x2(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_u32_x3( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [3 x <2 x i32>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x <2 x i32>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x <2 x i32>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x <2 x i32>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <2 x i32> [[B_COERCE_FCA_2_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP2]] to <2 x i32> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x3.v2i32.p0(<2 x i32> [[TMP3]], <2 x i32> [[TMP4]], <2 x i32> [[TMP5]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_u32_x3( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [3 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <2 x i32> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <2 x i32> // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast i64 [[B_COERCE_FCA_2_EXTRACT]] to <2 x i32> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <2 x i32> [[TMP1]] to <8 x i8> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <2 x i32> [[TMP2]] to <8 x i8> // CHECK-A32-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x i32> // CHECK-A32-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <2 x i32> // CHECK-A32-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <2 x i32> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x3.p0.v2i32(ptr [[A]], <2 x i32> [[TMP6]], <2 x i32> [[TMP7]], <2 x i32> [[TMP8]]) // CHECK-A32-NEXT: ret void // void test_vst1_u32_x3(uint32_t *a, uint32x2x3_t b) { vst1_u32_x3(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_u32_x4( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [4 x <2 x i32>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x <2 x i32>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x <2 x i32>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x <2 x i32>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x <2 x i32>] [[B_COERCE]], 3 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <2 x i32> [[B_COERCE_FCA_2_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <2 x i32> [[B_COERCE_FCA_3_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> // CHECK-A64-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP2]] to <2 x i32> // CHECK-A64-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x i32> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x4.v2i32.p0(<2 x i32> [[TMP4]], <2 x i32> [[TMP5]], <2 x i32> [[TMP6]], <2 x i32> [[TMP7]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_u32_x4( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [4 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <2 x i32> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <2 x i32> // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast i64 [[B_COERCE_FCA_2_EXTRACT]] to <2 x i32> // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast i64 [[B_COERCE_FCA_3_EXTRACT]] to <2 x i32> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <2 x i32> [[TMP1]] to <8 x i8> // CHECK-A32-NEXT: [[TMP6:%.*]] = bitcast <2 x i32> [[TMP2]] to <8 x i8> // CHECK-A32-NEXT: [[TMP7:%.*]] = bitcast <2 x i32> [[TMP3]] to <8 x i8> // CHECK-A32-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP4]] to <2 x i32> // CHECK-A32-NEXT: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP5]] to <2 x i32> // CHECK-A32-NEXT: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <2 x i32> // CHECK-A32-NEXT: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP7]] to <2 x i32> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x4.p0.v2i32(ptr [[A]], <2 x i32> [[TMP8]], <2 x i32> [[TMP9]], <2 x i32> [[TMP10]], <2 x i32> [[TMP11]]) // CHECK-A32-NEXT: ret void // void test_vst1_u32_x4(uint32_t *a, uint32x2x4_t b) { vst1_u32_x4(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_u64_x2( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [2 x <1 x i64>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x <1 x i64>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x <1 x i64>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x2.v1i64.p0(<1 x i64> [[TMP2]], <1 x i64> [[TMP3]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_u64_x2( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [2 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <1 x i64> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <1 x i64> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <1 x i64> [[TMP0]] to <8 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <1 x i64> [[TMP1]] to <8 x i8> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP2]] to <1 x i64> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP3]] to <1 x i64> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x2.p0.v1i64(ptr [[A]], <1 x i64> [[TMP4]], <1 x i64> [[TMP5]]) // CHECK-A32-NEXT: ret void // void test_vst1_u64_x2(uint64_t *a, uint64x1x2_t b) { vst1_u64_x2(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_u64_x3( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [3 x <1 x i64>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x <1 x i64>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x <1 x i64>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x <1 x i64>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <1 x i64> [[B_COERCE_FCA_2_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP2]] to <1 x i64> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x3.v1i64.p0(<1 x i64> [[TMP3]], <1 x i64> [[TMP4]], <1 x i64> [[TMP5]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_u64_x3( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [3 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <1 x i64> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <1 x i64> // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast i64 [[B_COERCE_FCA_2_EXTRACT]] to <1 x i64> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <1 x i64> [[TMP0]] to <8 x i8> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <1 x i64> [[TMP1]] to <8 x i8> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <1 x i64> [[TMP2]] to <8 x i8> // CHECK-A32-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP3]] to <1 x i64> // CHECK-A32-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x i64> // CHECK-A32-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP5]] to <1 x i64> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x3.p0.v1i64(ptr [[A]], <1 x i64> [[TMP6]], <1 x i64> [[TMP7]], <1 x i64> [[TMP8]]) // CHECK-A32-NEXT: ret void // void test_vst1_u64_x3(uint64_t *a, uint64x1x3_t b) { vst1_u64_x3(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_u64_x4( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [4 x <1 x i64>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x <1 x i64>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x <1 x i64>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x <1 x i64>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x <1 x i64>] [[B_COERCE]], 3 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <1 x i64> [[B_COERCE_FCA_2_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <1 x i64> [[B_COERCE_FCA_3_EXTRACT]] to <8 x i8> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> // CHECK-A64-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP2]] to <1 x i64> // CHECK-A64-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP3]] to <1 x i64> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x4.v1i64.p0(<1 x i64> [[TMP4]], <1 x i64> [[TMP5]], <1 x i64> [[TMP6]], <1 x i64> [[TMP7]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_u64_x4( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [4 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <1 x i64> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <1 x i64> // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast i64 [[B_COERCE_FCA_2_EXTRACT]] to <1 x i64> // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast i64 [[B_COERCE_FCA_3_EXTRACT]] to <1 x i64> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <1 x i64> [[TMP0]] to <8 x i8> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <1 x i64> [[TMP1]] to <8 x i8> // CHECK-A32-NEXT: [[TMP6:%.*]] = bitcast <1 x i64> [[TMP2]] to <8 x i8> // CHECK-A32-NEXT: [[TMP7:%.*]] = bitcast <1 x i64> [[TMP3]] to <8 x i8> // CHECK-A32-NEXT: [[TMP8:%.*]] = bitcast <8 x i8> [[TMP4]] to <1 x i64> // CHECK-A32-NEXT: [[TMP9:%.*]] = bitcast <8 x i8> [[TMP5]] to <1 x i64> // CHECK-A32-NEXT: [[TMP10:%.*]] = bitcast <8 x i8> [[TMP6]] to <1 x i64> // CHECK-A32-NEXT: [[TMP11:%.*]] = bitcast <8 x i8> [[TMP7]] to <1 x i64> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x4.p0.v1i64(ptr [[A]], <1 x i64> [[TMP8]], <1 x i64> [[TMP9]], <1 x i64> [[TMP10]], <1 x i64> [[TMP11]]) // CHECK-A32-NEXT: ret void // void test_vst1_u64_x4(uint64_t *a, uint64x1x4_t b) { vst1_u64_x4(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_u8_x2( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [2 x <8 x i8>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x <8 x i8>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x <8 x i8>] [[B_COERCE]], 1 // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x2.v8i8.p0(<8 x i8> [[B_COERCE_FCA_0_EXTRACT]], <8 x i8> [[B_COERCE_FCA_1_EXTRACT]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_u8_x2( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [2 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x2.p0.v8i8(ptr [[A]], <8 x i8> [[TMP0]], <8 x i8> [[TMP1]]) // CHECK-A32-NEXT: ret void // void test_vst1_u8_x2(uint8_t *a, uint8x8x2_t b) { vst1_u8_x2(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_u8_x3( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [3 x <8 x i8>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x <8 x i8>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x <8 x i8>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x <8 x i8>] [[B_COERCE]], 2 // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x3.v8i8.p0(<8 x i8> [[B_COERCE_FCA_0_EXTRACT]], <8 x i8> [[B_COERCE_FCA_1_EXTRACT]], <8 x i8> [[B_COERCE_FCA_2_EXTRACT]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_u8_x3( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [3 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast i64 [[B_COERCE_FCA_2_EXTRACT]] to <8 x i8> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x3.p0.v8i8(ptr [[A]], <8 x i8> [[TMP0]], <8 x i8> [[TMP1]], <8 x i8> [[TMP2]]) // CHECK-A32-NEXT: ret void // void test_vst1_u8_x3(uint8_t *a, uint8x8x3_t b) { vst1_u8_x3(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1_u8_x4( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [4 x <8 x i8>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x <8 x i8>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x <8 x i8>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x <8 x i8>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x <8 x i8>] [[B_COERCE]], 3 // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x4.v8i8.p0(<8 x i8> [[B_COERCE_FCA_0_EXTRACT]], <8 x i8> [[B_COERCE_FCA_1_EXTRACT]], <8 x i8> [[B_COERCE_FCA_2_EXTRACT]], <8 x i8> [[B_COERCE_FCA_3_EXTRACT]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1_u8_x4( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [4 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast i64 [[B_COERCE_FCA_0_EXTRACT]] to <8 x i8> // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast i64 [[B_COERCE_FCA_1_EXTRACT]] to <8 x i8> // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast i64 [[B_COERCE_FCA_2_EXTRACT]] to <8 x i8> // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast i64 [[B_COERCE_FCA_3_EXTRACT]] to <8 x i8> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x4.p0.v8i8(ptr [[A]], <8 x i8> [[TMP0]], <8 x i8> [[TMP1]], <8 x i8> [[TMP2]], <8 x i8> [[TMP3]]) // CHECK-A32-NEXT: ret void // void test_vst1_u8_x4(uint8_t *a, uint8x8x4_t b) { vst1_u8_x4(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_f16_x2( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [2 x <8 x half>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x <8 x half>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <8 x half> [[B_COERCE_FCA_0_EXTRACT]] to <8 x i16> // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x <8 x half>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <8 x half> [[B_COERCE_FCA_1_EXTRACT]] to <8 x i16> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <8 x i16> [[TMP0]] to <16 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to <16 x i8> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x half> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP3]] to <8 x half> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x2.v8f16.p0(<8 x half> [[TMP4]], <8 x half> [[TMP5]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_f16_x2( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [4 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x2.p0.v8i16(ptr [[A]], <8 x i16> [[TMP2]], <8 x i16> [[TMP3]]) // CHECK-A32-NEXT: ret void // void test_vst1q_f16_x2(float16_t *a, float16x8x2_t b) { vst1q_f16_x2(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_f16_x3( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [3 x <8 x half>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x <8 x half>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <8 x half> [[B_COERCE_FCA_0_EXTRACT]] to <8 x i16> // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x <8 x half>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <8 x half> [[B_COERCE_FCA_1_EXTRACT]] to <8 x i16> // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x <8 x half>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <8 x half> [[B_COERCE_FCA_2_EXTRACT]] to <8 x i16> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP0]] to <16 x i8> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to <16 x i8> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP2]] to <16 x i8> // CHECK-A64-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP3]] to <8 x half> // CHECK-A64-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x half> // CHECK-A64-NEXT: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x half> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x3.v8f16.p0(<8 x half> [[TMP6]], <8 x half> [[TMP7]], <8 x half> [[TMP8]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_f16_x3( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [6 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_4_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 4 // CHECK-A32-NEXT: [[B_SROA_6_32_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_4_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_5_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 5 // CHECK-A32-NEXT: [[B_SROA_6_40_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_6_32_VEC_INSERT]], i64 [[B_COERCE_FCA_5_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_SROA_6_40_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x i16> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x3.p0.v8i16(ptr [[A]], <8 x i16> [[TMP3]], <8 x i16> [[TMP4]], <8 x i16> [[TMP5]]) // CHECK-A32-NEXT: ret void // void test_vst1q_f16_x3(float16_t *a, float16x8x3_t b) { vst1q_f16_x3(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_f16_x4( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [4 x <8 x half>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x <8 x half>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <8 x half> [[B_COERCE_FCA_0_EXTRACT]] to <8 x i16> // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x <8 x half>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <8 x half> [[B_COERCE_FCA_1_EXTRACT]] to <8 x i16> // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x <8 x half>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <8 x half> [[B_COERCE_FCA_2_EXTRACT]] to <8 x i16> // CHECK-A64-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x <8 x half>] [[B_COERCE]], 3 // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <8 x half> [[B_COERCE_FCA_3_EXTRACT]] to <8 x i16> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP0]] to <16 x i8> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP1]] to <16 x i8> // CHECK-A64-NEXT: [[TMP6:%.*]] = bitcast <8 x i16> [[TMP2]] to <16 x i8> // CHECK-A64-NEXT: [[TMP7:%.*]] = bitcast <8 x i16> [[TMP3]] to <16 x i8> // CHECK-A64-NEXT: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP4]] to <8 x half> // CHECK-A64-NEXT: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP5]] to <8 x half> // CHECK-A64-NEXT: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP6]] to <8 x half> // CHECK-A64-NEXT: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP7]] to <8 x half> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x4.v8f16.p0(<8 x half> [[TMP8]], <8 x half> [[TMP9]], <8 x half> [[TMP10]], <8 x half> [[TMP11]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_f16_x4( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [8 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_4_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 4 // CHECK-A32-NEXT: [[B_SROA_6_32_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_4_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_5_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 5 // CHECK-A32-NEXT: [[B_SROA_6_40_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_6_32_VEC_INSERT]], i64 [[B_COERCE_FCA_5_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_6_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 6 // CHECK-A32-NEXT: [[B_SROA_9_48_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_6_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_7_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 7 // CHECK-A32-NEXT: [[B_SROA_9_56_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_9_48_VEC_INSERT]], i64 [[B_COERCE_FCA_7_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_SROA_6_40_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[B_SROA_9_56_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> // CHECK-A32-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x i16> // CHECK-A32-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP3]] to <8 x i16> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x4.p0.v8i16(ptr [[A]], <8 x i16> [[TMP4]], <8 x i16> [[TMP5]], <8 x i16> [[TMP6]], <8 x i16> [[TMP7]]) // CHECK-A32-NEXT: ret void // void test_vst1q_f16_x4(float16_t *a, float16x8x4_t b) { vst1q_f16_x4(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_f32_x2( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [2 x <4 x float>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x <4 x float>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[B_COERCE_FCA_0_EXTRACT]] to <4 x i32> // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x <4 x float>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[B_COERCE_FCA_1_EXTRACT]] to <4 x i32> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to <16 x i8> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP2]] to <4 x float> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP3]] to <4 x float> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x2.v4f32.p0(<4 x float> [[TMP4]], <4 x float> [[TMP5]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_f32_x2( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [4 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x2.p0.v4f32(ptr [[A]], <4 x float> [[TMP2]], <4 x float> [[TMP3]]) // CHECK-A32-NEXT: ret void // void test_vst1q_f32_x2(float32_t *a, float32x4x2_t b) { vst1q_f32_x2(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_f32_x3( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [3 x <4 x float>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x <4 x float>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[B_COERCE_FCA_0_EXTRACT]] to <4 x i32> // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x <4 x float>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[B_COERCE_FCA_1_EXTRACT]] to <4 x i32> // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x <4 x float>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <4 x float> [[B_COERCE_FCA_2_EXTRACT]] to <4 x i32> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to <16 x i8> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP2]] to <16 x i8> // CHECK-A64-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP3]] to <4 x float> // CHECK-A64-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to <4 x float> // CHECK-A64-NEXT: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x float> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x3.v4f32.p0(<4 x float> [[TMP6]], <4 x float> [[TMP7]], <4 x float> [[TMP8]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_f32_x3( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [6 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_4_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 4 // CHECK-A32-NEXT: [[B_SROA_6_32_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_4_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_5_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 5 // CHECK-A32-NEXT: [[B_SROA_6_40_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_6_32_VEC_INSERT]], i64 [[B_COERCE_FCA_5_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_SROA_6_40_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <4 x float> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x3.p0.v4f32(ptr [[A]], <4 x float> [[TMP3]], <4 x float> [[TMP4]], <4 x float> [[TMP5]]) // CHECK-A32-NEXT: ret void // void test_vst1q_f32_x3(float32_t *a, float32x4x3_t b) { vst1q_f32_x3(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_f32_x4( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [4 x <4 x float>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x <4 x float>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[B_COERCE_FCA_0_EXTRACT]] to <4 x i32> // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x <4 x float>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[B_COERCE_FCA_1_EXTRACT]] to <4 x i32> // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x <4 x float>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <4 x float> [[B_COERCE_FCA_2_EXTRACT]] to <4 x i32> // CHECK-A64-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x <4 x float>] [[B_COERCE]], 3 // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <4 x float> [[B_COERCE_FCA_3_EXTRACT]] to <4 x i32> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to <16 x i8> // CHECK-A64-NEXT: [[TMP6:%.*]] = bitcast <4 x i32> [[TMP2]] to <16 x i8> // CHECK-A64-NEXT: [[TMP7:%.*]] = bitcast <4 x i32> [[TMP3]] to <16 x i8> // CHECK-A64-NEXT: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP4]] to <4 x float> // CHECK-A64-NEXT: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x float> // CHECK-A64-NEXT: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP6]] to <4 x float> // CHECK-A64-NEXT: [[TMP11:%.*]] = bitcast <16 x i8> [[TMP7]] to <4 x float> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x4.v4f32.p0(<4 x float> [[TMP8]], <4 x float> [[TMP9]], <4 x float> [[TMP10]], <4 x float> [[TMP11]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_f32_x4( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [8 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_4_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 4 // CHECK-A32-NEXT: [[B_SROA_6_32_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_4_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_5_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 5 // CHECK-A32-NEXT: [[B_SROA_6_40_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_6_32_VEC_INSERT]], i64 [[B_COERCE_FCA_5_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_6_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 6 // CHECK-A32-NEXT: [[B_SROA_9_48_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_6_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_7_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 7 // CHECK-A32-NEXT: [[B_SROA_9_56_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_9_48_VEC_INSERT]], i64 [[B_COERCE_FCA_7_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_SROA_6_40_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[B_SROA_9_56_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float> // CHECK-A32-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP2]] to <4 x float> // CHECK-A32-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP3]] to <4 x float> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x4.p0.v4f32(ptr [[A]], <4 x float> [[TMP4]], <4 x float> [[TMP5]], <4 x float> [[TMP6]], <4 x float> [[TMP7]]) // CHECK-A32-NEXT: ret void // void test_vst1q_f32_x4(float32_t *a, float32x4x4_t b) { vst1q_f32_x4(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_p16_x2( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [2 x <8 x i16>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x <8 x i16>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x <8 x i16>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[B_COERCE_FCA_0_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[B_COERCE_FCA_1_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x2.v8i16.p0(<8 x i16> [[TMP2]], <8 x i16> [[TMP3]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_p16_x2( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [4 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x2.p0.v8i16(ptr [[A]], <8 x i16> [[TMP2]], <8 x i16> [[TMP3]]) // CHECK-A32-NEXT: ret void // void test_vst1q_p16_x2(poly16_t *a, poly16x8x2_t b) { vst1q_p16_x2(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_p16_x3( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [3 x <8 x i16>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x <8 x i16>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x <8 x i16>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x <8 x i16>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[B_COERCE_FCA_0_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[B_COERCE_FCA_1_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <8 x i16> [[B_COERCE_FCA_2_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x i16> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x3.v8i16.p0(<8 x i16> [[TMP3]], <8 x i16> [[TMP4]], <8 x i16> [[TMP5]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_p16_x3( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [6 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_4_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 4 // CHECK-A32-NEXT: [[B_SROA_6_32_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_4_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_5_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 5 // CHECK-A32-NEXT: [[B_SROA_6_40_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_6_32_VEC_INSERT]], i64 [[B_COERCE_FCA_5_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_SROA_6_40_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x i16> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x3.p0.v8i16(ptr [[A]], <8 x i16> [[TMP3]], <8 x i16> [[TMP4]], <8 x i16> [[TMP5]]) // CHECK-A32-NEXT: ret void // void test_vst1q_p16_x3(poly16_t *a, poly16x8x3_t b) { vst1q_p16_x3(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_p16_x4( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [4 x <8 x i16>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x <8 x i16>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x <8 x i16>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x <8 x i16>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x <8 x i16>] [[B_COERCE]], 3 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[B_COERCE_FCA_0_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[B_COERCE_FCA_1_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <8 x i16> [[B_COERCE_FCA_2_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[B_COERCE_FCA_3_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> // CHECK-A64-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x i16> // CHECK-A64-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP3]] to <8 x i16> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x4.v8i16.p0(<8 x i16> [[TMP4]], <8 x i16> [[TMP5]], <8 x i16> [[TMP6]], <8 x i16> [[TMP7]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_p16_x4( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [8 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_4_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 4 // CHECK-A32-NEXT: [[B_SROA_6_32_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_4_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_5_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 5 // CHECK-A32-NEXT: [[B_SROA_6_40_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_6_32_VEC_INSERT]], i64 [[B_COERCE_FCA_5_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_6_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 6 // CHECK-A32-NEXT: [[B_SROA_9_48_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_6_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_7_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 7 // CHECK-A32-NEXT: [[B_SROA_9_56_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_9_48_VEC_INSERT]], i64 [[B_COERCE_FCA_7_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_SROA_6_40_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[B_SROA_9_56_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> // CHECK-A32-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x i16> // CHECK-A32-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP3]] to <8 x i16> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x4.p0.v8i16(ptr [[A]], <8 x i16> [[TMP4]], <8 x i16> [[TMP5]], <8 x i16> [[TMP6]], <8 x i16> [[TMP7]]) // CHECK-A32-NEXT: ret void // void test_vst1q_p16_x4(poly16_t *a, poly16x8x4_t b) { vst1q_p16_x4(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_p8_x2( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [2 x <16 x i8>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x <16 x i8>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x <16 x i8>] [[B_COERCE]], 1 // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x2.v16i8.p0(<16 x i8> [[B_COERCE_FCA_0_EXTRACT]], <16 x i8> [[B_COERCE_FCA_1_EXTRACT]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_p8_x2( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [4 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x2.p0.v16i8(ptr [[A]], <16 x i8> [[TMP0]], <16 x i8> [[TMP1]]) // CHECK-A32-NEXT: ret void // void test_vst1q_p8_x2(poly8_t *a, poly8x16x2_t b) { vst1q_p8_x2(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_p8_x3( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [3 x <16 x i8>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x <16 x i8>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x <16 x i8>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x <16 x i8>] [[B_COERCE]], 2 // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x3.v16i8.p0(<16 x i8> [[B_COERCE_FCA_0_EXTRACT]], <16 x i8> [[B_COERCE_FCA_1_EXTRACT]], <16 x i8> [[B_COERCE_FCA_2_EXTRACT]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_p8_x3( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [6 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_4_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 4 // CHECK-A32-NEXT: [[B_SROA_6_32_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_4_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_5_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 5 // CHECK-A32-NEXT: [[B_SROA_6_40_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_6_32_VEC_INSERT]], i64 [[B_COERCE_FCA_5_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_SROA_6_40_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x3.p0.v16i8(ptr [[A]], <16 x i8> [[TMP0]], <16 x i8> [[TMP1]], <16 x i8> [[TMP2]]) // CHECK-A32-NEXT: ret void // void test_vst1q_p8_x3(poly8_t *a, poly8x16x3_t b) { vst1q_p8_x3(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_p8_x4( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [4 x <16 x i8>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x <16 x i8>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x <16 x i8>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x <16 x i8>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x <16 x i8>] [[B_COERCE]], 3 // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x4.v16i8.p0(<16 x i8> [[B_COERCE_FCA_0_EXTRACT]], <16 x i8> [[B_COERCE_FCA_1_EXTRACT]], <16 x i8> [[B_COERCE_FCA_2_EXTRACT]], <16 x i8> [[B_COERCE_FCA_3_EXTRACT]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_p8_x4( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [8 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_4_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 4 // CHECK-A32-NEXT: [[B_SROA_6_32_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_4_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_5_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 5 // CHECK-A32-NEXT: [[B_SROA_6_40_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_6_32_VEC_INSERT]], i64 [[B_COERCE_FCA_5_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_6_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 6 // CHECK-A32-NEXT: [[B_SROA_9_48_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_6_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_7_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 7 // CHECK-A32-NEXT: [[B_SROA_9_56_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_9_48_VEC_INSERT]], i64 [[B_COERCE_FCA_7_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_SROA_6_40_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[B_SROA_9_56_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x4.p0.v16i8(ptr [[A]], <16 x i8> [[TMP0]], <16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]]) // CHECK-A32-NEXT: ret void // void test_vst1q_p8_x4(poly8_t *a, poly8x16x4_t b) { vst1q_p8_x4(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_s16_x2( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [2 x <8 x i16>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x <8 x i16>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x <8 x i16>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[B_COERCE_FCA_0_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[B_COERCE_FCA_1_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x2.v8i16.p0(<8 x i16> [[TMP2]], <8 x i16> [[TMP3]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_s16_x2( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [4 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x2.p0.v8i16(ptr [[A]], <8 x i16> [[TMP2]], <8 x i16> [[TMP3]]) // CHECK-A32-NEXT: ret void // void test_vst1q_s16_x2(int16_t *a, int16x8x2_t b) { vst1q_s16_x2(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_s16_x3( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [3 x <8 x i16>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x <8 x i16>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x <8 x i16>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x <8 x i16>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[B_COERCE_FCA_0_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[B_COERCE_FCA_1_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <8 x i16> [[B_COERCE_FCA_2_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x i16> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x3.v8i16.p0(<8 x i16> [[TMP3]], <8 x i16> [[TMP4]], <8 x i16> [[TMP5]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_s16_x3( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [6 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_4_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 4 // CHECK-A32-NEXT: [[B_SROA_6_32_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_4_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_5_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 5 // CHECK-A32-NEXT: [[B_SROA_6_40_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_6_32_VEC_INSERT]], i64 [[B_COERCE_FCA_5_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_SROA_6_40_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x i16> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x3.p0.v8i16(ptr [[A]], <8 x i16> [[TMP3]], <8 x i16> [[TMP4]], <8 x i16> [[TMP5]]) // CHECK-A32-NEXT: ret void // void test_vst1q_s16_x3(int16_t *a, int16x8x3_t b) { vst1q_s16_x3(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_s16_x4( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [4 x <8 x i16>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x <8 x i16>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x <8 x i16>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x <8 x i16>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x <8 x i16>] [[B_COERCE]], 3 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[B_COERCE_FCA_0_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[B_COERCE_FCA_1_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <8 x i16> [[B_COERCE_FCA_2_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[B_COERCE_FCA_3_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> // CHECK-A64-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x i16> // CHECK-A64-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP3]] to <8 x i16> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x4.v8i16.p0(<8 x i16> [[TMP4]], <8 x i16> [[TMP5]], <8 x i16> [[TMP6]], <8 x i16> [[TMP7]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_s16_x4( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [8 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_4_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 4 // CHECK-A32-NEXT: [[B_SROA_6_32_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_4_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_5_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 5 // CHECK-A32-NEXT: [[B_SROA_6_40_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_6_32_VEC_INSERT]], i64 [[B_COERCE_FCA_5_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_6_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 6 // CHECK-A32-NEXT: [[B_SROA_9_48_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_6_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_7_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 7 // CHECK-A32-NEXT: [[B_SROA_9_56_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_9_48_VEC_INSERT]], i64 [[B_COERCE_FCA_7_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_SROA_6_40_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[B_SROA_9_56_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> // CHECK-A32-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x i16> // CHECK-A32-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP3]] to <8 x i16> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x4.p0.v8i16(ptr [[A]], <8 x i16> [[TMP4]], <8 x i16> [[TMP5]], <8 x i16> [[TMP6]], <8 x i16> [[TMP7]]) // CHECK-A32-NEXT: ret void // void test_vst1q_s16_x4(int16_t *a, int16x8x4_t b) { vst1q_s16_x4(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_s32_x2( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [2 x <4 x i32>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x <4 x i32>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x <4 x i32>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[B_COERCE_FCA_0_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[B_COERCE_FCA_1_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x2.v4i32.p0(<4 x i32> [[TMP2]], <4 x i32> [[TMP3]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_s32_x2( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [4 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x2.p0.v4i32(ptr [[A]], <4 x i32> [[TMP2]], <4 x i32> [[TMP3]]) // CHECK-A32-NEXT: ret void // void test_vst1q_s32_x2(int32_t *a, int32x4x2_t b) { vst1q_s32_x2(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_s32_x3( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [3 x <4 x i32>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x <4 x i32>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x <4 x i32>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x <4 x i32>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[B_COERCE_FCA_0_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[B_COERCE_FCA_1_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[B_COERCE_FCA_2_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <4 x i32> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x3.v4i32.p0(<4 x i32> [[TMP3]], <4 x i32> [[TMP4]], <4 x i32> [[TMP5]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_s32_x3( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [6 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_4_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 4 // CHECK-A32-NEXT: [[B_SROA_6_32_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_4_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_5_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 5 // CHECK-A32-NEXT: [[B_SROA_6_40_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_6_32_VEC_INSERT]], i64 [[B_COERCE_FCA_5_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_SROA_6_40_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <4 x i32> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x3.p0.v4i32(ptr [[A]], <4 x i32> [[TMP3]], <4 x i32> [[TMP4]], <4 x i32> [[TMP5]]) // CHECK-A32-NEXT: ret void // void test_vst1q_s32_x3(int32_t *a, int32x4x3_t b) { vst1q_s32_x3(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_s32_x4( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [4 x <4 x i32>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x <4 x i32>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x <4 x i32>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x <4 x i32>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x <4 x i32>] [[B_COERCE]], 3 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[B_COERCE_FCA_0_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[B_COERCE_FCA_1_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[B_COERCE_FCA_2_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[B_COERCE_FCA_3_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> // CHECK-A64-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP2]] to <4 x i32> // CHECK-A64-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP3]] to <4 x i32> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x4.v4i32.p0(<4 x i32> [[TMP4]], <4 x i32> [[TMP5]], <4 x i32> [[TMP6]], <4 x i32> [[TMP7]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_s32_x4( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [8 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_4_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 4 // CHECK-A32-NEXT: [[B_SROA_6_32_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_4_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_5_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 5 // CHECK-A32-NEXT: [[B_SROA_6_40_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_6_32_VEC_INSERT]], i64 [[B_COERCE_FCA_5_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_6_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 6 // CHECK-A32-NEXT: [[B_SROA_9_48_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_6_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_7_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 7 // CHECK-A32-NEXT: [[B_SROA_9_56_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_9_48_VEC_INSERT]], i64 [[B_COERCE_FCA_7_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_SROA_6_40_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[B_SROA_9_56_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> // CHECK-A32-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP2]] to <4 x i32> // CHECK-A32-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP3]] to <4 x i32> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x4.p0.v4i32(ptr [[A]], <4 x i32> [[TMP4]], <4 x i32> [[TMP5]], <4 x i32> [[TMP6]], <4 x i32> [[TMP7]]) // CHECK-A32-NEXT: ret void // void test_vst1q_s32_x4(int32_t *a, int32x4x4_t b) { vst1q_s32_x4(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_s64_x2( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [2 x <2 x i64>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x <2 x i64>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x <2 x i64>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_COERCE_FCA_0_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_COERCE_FCA_1_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x2.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_s64_x2( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [4 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x2.p0.v2i64(ptr [[A]], <2 x i64> [[TMP2]], <2 x i64> [[TMP3]]) // CHECK-A32-NEXT: ret void // void test_vst1q_s64_x2(int64_t *a, int64x2x2_t b) { vst1q_s64_x2(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_s64_x3( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [3 x <2 x i64>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x <2 x i64>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x <2 x i64>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x <2 x i64>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_COERCE_FCA_0_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_COERCE_FCA_1_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_COERCE_FCA_2_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x i64> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x3.v2i64.p0(<2 x i64> [[TMP3]], <2 x i64> [[TMP4]], <2 x i64> [[TMP5]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_s64_x3( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [6 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_4_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 4 // CHECK-A32-NEXT: [[B_SROA_6_32_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_4_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_5_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 5 // CHECK-A32-NEXT: [[B_SROA_6_40_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_6_32_VEC_INSERT]], i64 [[B_COERCE_FCA_5_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_SROA_6_40_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x i64> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x3.p0.v2i64(ptr [[A]], <2 x i64> [[TMP3]], <2 x i64> [[TMP4]], <2 x i64> [[TMP5]]) // CHECK-A32-NEXT: ret void // void test_vst1q_s64_x3(int64_t *a, int64x2x3_t b) { vst1q_s64_x3(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_s64_x4( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [4 x <2 x i64>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x <2 x i64>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x <2 x i64>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x <2 x i64>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x <2 x i64>] [[B_COERCE]], 3 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_COERCE_FCA_0_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_COERCE_FCA_1_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_COERCE_FCA_2_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[B_COERCE_FCA_3_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> // CHECK-A64-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x i64> // CHECK-A64-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP3]] to <2 x i64> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x4.v2i64.p0(<2 x i64> [[TMP4]], <2 x i64> [[TMP5]], <2 x i64> [[TMP6]], <2 x i64> [[TMP7]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_s64_x4( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [8 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_4_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 4 // CHECK-A32-NEXT: [[B_SROA_6_32_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_4_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_5_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 5 // CHECK-A32-NEXT: [[B_SROA_6_40_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_6_32_VEC_INSERT]], i64 [[B_COERCE_FCA_5_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_6_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 6 // CHECK-A32-NEXT: [[B_SROA_9_48_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_6_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_7_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 7 // CHECK-A32-NEXT: [[B_SROA_9_56_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_9_48_VEC_INSERT]], i64 [[B_COERCE_FCA_7_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_SROA_6_40_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[B_SROA_9_56_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> // CHECK-A32-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x i64> // CHECK-A32-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP3]] to <2 x i64> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x4.p0.v2i64(ptr [[A]], <2 x i64> [[TMP4]], <2 x i64> [[TMP5]], <2 x i64> [[TMP6]], <2 x i64> [[TMP7]]) // CHECK-A32-NEXT: ret void // void test_vst1q_s64_x4(int64_t *a, int64x2x4_t b) { vst1q_s64_x4(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_s8_x2( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [2 x <16 x i8>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x <16 x i8>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x <16 x i8>] [[B_COERCE]], 1 // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x2.v16i8.p0(<16 x i8> [[B_COERCE_FCA_0_EXTRACT]], <16 x i8> [[B_COERCE_FCA_1_EXTRACT]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_s8_x2( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [4 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x2.p0.v16i8(ptr [[A]], <16 x i8> [[TMP0]], <16 x i8> [[TMP1]]) // CHECK-A32-NEXT: ret void // void test_vst1q_s8_x2(int8_t *a, int8x16x2_t b) { vst1q_s8_x2(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_s8_x3( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [3 x <16 x i8>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x <16 x i8>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x <16 x i8>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x <16 x i8>] [[B_COERCE]], 2 // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x3.v16i8.p0(<16 x i8> [[B_COERCE_FCA_0_EXTRACT]], <16 x i8> [[B_COERCE_FCA_1_EXTRACT]], <16 x i8> [[B_COERCE_FCA_2_EXTRACT]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_s8_x3( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [6 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_4_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 4 // CHECK-A32-NEXT: [[B_SROA_6_32_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_4_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_5_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 5 // CHECK-A32-NEXT: [[B_SROA_6_40_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_6_32_VEC_INSERT]], i64 [[B_COERCE_FCA_5_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_SROA_6_40_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x3.p0.v16i8(ptr [[A]], <16 x i8> [[TMP0]], <16 x i8> [[TMP1]], <16 x i8> [[TMP2]]) // CHECK-A32-NEXT: ret void // void test_vst1q_s8_x3(int8_t *a, int8x16x3_t b) { vst1q_s8_x3(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_s8_x4( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [4 x <16 x i8>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x <16 x i8>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x <16 x i8>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x <16 x i8>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x <16 x i8>] [[B_COERCE]], 3 // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x4.v16i8.p0(<16 x i8> [[B_COERCE_FCA_0_EXTRACT]], <16 x i8> [[B_COERCE_FCA_1_EXTRACT]], <16 x i8> [[B_COERCE_FCA_2_EXTRACT]], <16 x i8> [[B_COERCE_FCA_3_EXTRACT]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_s8_x4( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [8 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_4_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 4 // CHECK-A32-NEXT: [[B_SROA_6_32_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_4_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_5_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 5 // CHECK-A32-NEXT: [[B_SROA_6_40_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_6_32_VEC_INSERT]], i64 [[B_COERCE_FCA_5_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_6_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 6 // CHECK-A32-NEXT: [[B_SROA_9_48_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_6_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_7_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 7 // CHECK-A32-NEXT: [[B_SROA_9_56_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_9_48_VEC_INSERT]], i64 [[B_COERCE_FCA_7_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_SROA_6_40_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[B_SROA_9_56_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x4.p0.v16i8(ptr [[A]], <16 x i8> [[TMP0]], <16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]]) // CHECK-A32-NEXT: ret void // void test_vst1q_s8_x4(int8_t *a, int8x16x4_t b) { vst1q_s8_x4(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_u16_x2( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [2 x <8 x i16>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x <8 x i16>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x <8 x i16>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[B_COERCE_FCA_0_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[B_COERCE_FCA_1_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x2.v8i16.p0(<8 x i16> [[TMP2]], <8 x i16> [[TMP3]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_u16_x2( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [4 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x2.p0.v8i16(ptr [[A]], <8 x i16> [[TMP2]], <8 x i16> [[TMP3]]) // CHECK-A32-NEXT: ret void // void test_vst1q_u16_x2(uint16_t *a, uint16x8x2_t b) { vst1q_u16_x2(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_u16_x3( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [3 x <8 x i16>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x <8 x i16>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x <8 x i16>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x <8 x i16>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[B_COERCE_FCA_0_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[B_COERCE_FCA_1_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <8 x i16> [[B_COERCE_FCA_2_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x i16> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x3.v8i16.p0(<8 x i16> [[TMP3]], <8 x i16> [[TMP4]], <8 x i16> [[TMP5]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_u16_x3( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [6 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_4_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 4 // CHECK-A32-NEXT: [[B_SROA_6_32_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_4_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_5_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 5 // CHECK-A32-NEXT: [[B_SROA_6_40_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_6_32_VEC_INSERT]], i64 [[B_COERCE_FCA_5_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_SROA_6_40_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x i16> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x3.p0.v8i16(ptr [[A]], <8 x i16> [[TMP3]], <8 x i16> [[TMP4]], <8 x i16> [[TMP5]]) // CHECK-A32-NEXT: ret void // void test_vst1q_u16_x3(uint16_t *a, uint16x8x3_t b) { vst1q_u16_x3(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_u16_x4( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [4 x <8 x i16>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x <8 x i16>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x <8 x i16>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x <8 x i16>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x <8 x i16>] [[B_COERCE]], 3 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[B_COERCE_FCA_0_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[B_COERCE_FCA_1_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <8 x i16> [[B_COERCE_FCA_2_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[B_COERCE_FCA_3_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> // CHECK-A64-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x i16> // CHECK-A64-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP3]] to <8 x i16> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x4.v8i16.p0(<8 x i16> [[TMP4]], <8 x i16> [[TMP5]], <8 x i16> [[TMP6]], <8 x i16> [[TMP7]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_u16_x4( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [8 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_4_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 4 // CHECK-A32-NEXT: [[B_SROA_6_32_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_4_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_5_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 5 // CHECK-A32-NEXT: [[B_SROA_6_40_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_6_32_VEC_INSERT]], i64 [[B_COERCE_FCA_5_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_6_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 6 // CHECK-A32-NEXT: [[B_SROA_9_48_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_6_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_7_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 7 // CHECK-A32-NEXT: [[B_SROA_9_56_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_9_48_VEC_INSERT]], i64 [[B_COERCE_FCA_7_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_SROA_6_40_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[B_SROA_9_56_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> // CHECK-A32-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP2]] to <8 x i16> // CHECK-A32-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP3]] to <8 x i16> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x4.p0.v8i16(ptr [[A]], <8 x i16> [[TMP4]], <8 x i16> [[TMP5]], <8 x i16> [[TMP6]], <8 x i16> [[TMP7]]) // CHECK-A32-NEXT: ret void // void test_vst1q_u16_x4(uint16_t *a, uint16x8x4_t b) { vst1q_u16_x4(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_u32_x2( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [2 x <4 x i32>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x <4 x i32>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x <4 x i32>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[B_COERCE_FCA_0_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[B_COERCE_FCA_1_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x2.v4i32.p0(<4 x i32> [[TMP2]], <4 x i32> [[TMP3]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_u32_x2( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [4 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x2.p0.v4i32(ptr [[A]], <4 x i32> [[TMP2]], <4 x i32> [[TMP3]]) // CHECK-A32-NEXT: ret void // void test_vst1q_u32_x2(uint32_t *a, uint32x4x2_t b) { vst1q_u32_x2(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_u32_x3( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [3 x <4 x i32>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x <4 x i32>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x <4 x i32>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x <4 x i32>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[B_COERCE_FCA_0_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[B_COERCE_FCA_1_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[B_COERCE_FCA_2_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <4 x i32> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x3.v4i32.p0(<4 x i32> [[TMP3]], <4 x i32> [[TMP4]], <4 x i32> [[TMP5]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_u32_x3( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [6 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_4_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 4 // CHECK-A32-NEXT: [[B_SROA_6_32_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_4_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_5_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 5 // CHECK-A32-NEXT: [[B_SROA_6_40_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_6_32_VEC_INSERT]], i64 [[B_COERCE_FCA_5_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_SROA_6_40_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <4 x i32> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x3.p0.v4i32(ptr [[A]], <4 x i32> [[TMP3]], <4 x i32> [[TMP4]], <4 x i32> [[TMP5]]) // CHECK-A32-NEXT: ret void // void test_vst1q_u32_x3(uint32_t *a, uint32x4x3_t b) { vst1q_u32_x3(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_u32_x4( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [4 x <4 x i32>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x <4 x i32>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x <4 x i32>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x <4 x i32>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x <4 x i32>] [[B_COERCE]], 3 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[B_COERCE_FCA_0_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[B_COERCE_FCA_1_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[B_COERCE_FCA_2_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[B_COERCE_FCA_3_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> // CHECK-A64-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP2]] to <4 x i32> // CHECK-A64-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP3]] to <4 x i32> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x4.v4i32.p0(<4 x i32> [[TMP4]], <4 x i32> [[TMP5]], <4 x i32> [[TMP6]], <4 x i32> [[TMP7]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_u32_x4( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [8 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_4_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 4 // CHECK-A32-NEXT: [[B_SROA_6_32_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_4_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_5_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 5 // CHECK-A32-NEXT: [[B_SROA_6_40_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_6_32_VEC_INSERT]], i64 [[B_COERCE_FCA_5_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_6_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 6 // CHECK-A32-NEXT: [[B_SROA_9_48_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_6_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_7_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 7 // CHECK-A32-NEXT: [[B_SROA_9_56_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_9_48_VEC_INSERT]], i64 [[B_COERCE_FCA_7_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_SROA_6_40_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[B_SROA_9_56_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> // CHECK-A32-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP2]] to <4 x i32> // CHECK-A32-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP3]] to <4 x i32> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x4.p0.v4i32(ptr [[A]], <4 x i32> [[TMP4]], <4 x i32> [[TMP5]], <4 x i32> [[TMP6]], <4 x i32> [[TMP7]]) // CHECK-A32-NEXT: ret void // void test_vst1q_u32_x4(uint32_t *a, uint32x4x4_t b) { vst1q_u32_x4(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_u64_x2( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [2 x <2 x i64>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x <2 x i64>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x <2 x i64>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_COERCE_FCA_0_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_COERCE_FCA_1_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x2.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_u64_x2( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [4 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x2.p0.v2i64(ptr [[A]], <2 x i64> [[TMP2]], <2 x i64> [[TMP3]]) // CHECK-A32-NEXT: ret void // void test_vst1q_u64_x2(uint64_t *a, uint64x2x2_t b) { vst1q_u64_x2(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_u64_x3( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [3 x <2 x i64>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x <2 x i64>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x <2 x i64>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x <2 x i64>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_COERCE_FCA_0_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_COERCE_FCA_1_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_COERCE_FCA_2_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x i64> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x3.v2i64.p0(<2 x i64> [[TMP3]], <2 x i64> [[TMP4]], <2 x i64> [[TMP5]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_u64_x3( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [6 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_4_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 4 // CHECK-A32-NEXT: [[B_SROA_6_32_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_4_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_5_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 5 // CHECK-A32-NEXT: [[B_SROA_6_40_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_6_32_VEC_INSERT]], i64 [[B_COERCE_FCA_5_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_SROA_6_40_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x i64> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x3.p0.v2i64(ptr [[A]], <2 x i64> [[TMP3]], <2 x i64> [[TMP4]], <2 x i64> [[TMP5]]) // CHECK-A32-NEXT: ret void // void test_vst1q_u64_x3(uint64_t *a, uint64x2x3_t b) { vst1q_u64_x3(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_u64_x4( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [4 x <2 x i64>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x <2 x i64>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x <2 x i64>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x <2 x i64>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x <2 x i64>] [[B_COERCE]], 3 // CHECK-A64-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_COERCE_FCA_0_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_COERCE_FCA_1_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_COERCE_FCA_2_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[B_COERCE_FCA_3_EXTRACT]] to <16 x i8> // CHECK-A64-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> // CHECK-A64-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> // CHECK-A64-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x i64> // CHECK-A64-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP3]] to <2 x i64> // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x4.v2i64.p0(<2 x i64> [[TMP4]], <2 x i64> [[TMP5]], <2 x i64> [[TMP6]], <2 x i64> [[TMP7]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_u64_x4( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [8 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_4_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 4 // CHECK-A32-NEXT: [[B_SROA_6_32_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_4_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_5_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 5 // CHECK-A32-NEXT: [[B_SROA_6_40_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_6_32_VEC_INSERT]], i64 [[B_COERCE_FCA_5_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_6_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 6 // CHECK-A32-NEXT: [[B_SROA_9_48_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_6_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_7_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 7 // CHECK-A32-NEXT: [[B_SROA_9_56_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_9_48_VEC_INSERT]], i64 [[B_COERCE_FCA_7_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_SROA_6_40_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[B_SROA_9_56_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> // CHECK-A32-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> // CHECK-A32-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x i64> // CHECK-A32-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP3]] to <2 x i64> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x4.p0.v2i64(ptr [[A]], <2 x i64> [[TMP4]], <2 x i64> [[TMP5]], <2 x i64> [[TMP6]], <2 x i64> [[TMP7]]) // CHECK-A32-NEXT: ret void // void test_vst1q_u64_x4(uint64_t *a, uint64x2x4_t b) { vst1q_u64_x4(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_u8_x2( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [2 x <16 x i8>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x <16 x i8>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x <16 x i8>] [[B_COERCE]], 1 // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x2.v16i8.p0(<16 x i8> [[B_COERCE_FCA_0_EXTRACT]], <16 x i8> [[B_COERCE_FCA_1_EXTRACT]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_u8_x2( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [4 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x2.p0.v16i8(ptr [[A]], <16 x i8> [[TMP0]], <16 x i8> [[TMP1]]) // CHECK-A32-NEXT: ret void // void test_vst1q_u8_x2(uint8_t *a, uint8x16x2_t b) { vst1q_u8_x2(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_u8_x3( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [3 x <16 x i8>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [3 x <16 x i8>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [3 x <16 x i8>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [3 x <16 x i8>] [[B_COERCE]], 2 // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x3.v16i8.p0(<16 x i8> [[B_COERCE_FCA_0_EXTRACT]], <16 x i8> [[B_COERCE_FCA_1_EXTRACT]], <16 x i8> [[B_COERCE_FCA_2_EXTRACT]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_u8_x3( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [6 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_4_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 4 // CHECK-A32-NEXT: [[B_SROA_6_32_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_4_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_5_EXTRACT:%.*]] = extractvalue [6 x i64] [[B_COERCE]], 5 // CHECK-A32-NEXT: [[B_SROA_6_40_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_6_32_VEC_INSERT]], i64 [[B_COERCE_FCA_5_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_SROA_6_40_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x3.p0.v16i8(ptr [[A]], <16 x i8> [[TMP0]], <16 x i8> [[TMP1]], <16 x i8> [[TMP2]]) // CHECK-A32-NEXT: ret void // void test_vst1q_u8_x3(uint8_t *a, uint8x16x3_t b) { vst1q_u8_x3(a, b); } // CHECK-A64-LABEL: define dso_local void @test_vst1q_u8_x4( // CHECK-A64-SAME: ptr noundef [[A:%.*]], [4 x <16 x i8>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A64-NEXT: [[ENTRY:.*:]] // CHECK-A64-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x <16 x i8>] [[B_COERCE]], 0 // CHECK-A64-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x <16 x i8>] [[B_COERCE]], 1 // CHECK-A64-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x <16 x i8>] [[B_COERCE]], 2 // CHECK-A64-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x <16 x i8>] [[B_COERCE]], 3 // CHECK-A64-NEXT: call void @llvm.aarch64.neon.st1x4.v16i8.p0(<16 x i8> [[B_COERCE_FCA_0_EXTRACT]], <16 x i8> [[B_COERCE_FCA_1_EXTRACT]], <16 x i8> [[B_COERCE_FCA_2_EXTRACT]], <16 x i8> [[B_COERCE_FCA_3_EXTRACT]], ptr [[A]]) // CHECK-A64-NEXT: ret void // // CHECK-A32-LABEL: define dso_local void @test_vst1q_u8_x4( // CHECK-A32-SAME: ptr noundef [[A:%.*]], [8 x i64] [[B_COERCE:%.*]]) #[[ATTR0]] { // CHECK-A32-NEXT: [[ENTRY:.*:]] // CHECK-A32-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 0 // CHECK-A32-NEXT: [[B_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_0_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 1 // CHECK-A32-NEXT: [[B_SROA_0_8_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_0_0_VEC_INSERT]], i64 [[B_COERCE_FCA_1_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 2 // CHECK-A32-NEXT: [[B_SROA_3_16_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_2_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 3 // CHECK-A32-NEXT: [[B_SROA_3_24_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_3_16_VEC_INSERT]], i64 [[B_COERCE_FCA_3_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_4_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 4 // CHECK-A32-NEXT: [[B_SROA_6_32_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_4_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_5_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 5 // CHECK-A32-NEXT: [[B_SROA_6_40_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_6_32_VEC_INSERT]], i64 [[B_COERCE_FCA_5_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[B_COERCE_FCA_6_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 6 // CHECK-A32-NEXT: [[B_SROA_9_48_VEC_INSERT:%.*]] = insertelement <2 x i64> undef, i64 [[B_COERCE_FCA_6_EXTRACT]], i32 0 // CHECK-A32-NEXT: [[B_COERCE_FCA_7_EXTRACT:%.*]] = extractvalue [8 x i64] [[B_COERCE]], 7 // CHECK-A32-NEXT: [[B_SROA_9_56_VEC_INSERT:%.*]] = insertelement <2 x i64> [[B_SROA_9_48_VEC_INSERT]], i64 [[B_COERCE_FCA_7_EXTRACT]], i32 1 // CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[B_SROA_0_8_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[B_SROA_3_24_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[B_SROA_6_40_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[B_SROA_9_56_VEC_INSERT]] to <16 x i8> // CHECK-A32-NEXT: call void @llvm.arm.neon.vst1x4.p0.v16i8(ptr [[A]], <16 x i8> [[TMP0]], <16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]]) // CHECK-A32-NEXT: ret void // void test_vst1q_u8_x4(uint8_t *a, uint8x16x4_t b) { vst1q_u8_x4(a, b); }