; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp,+fp64 -mve-max-interleave-factor=4 -verify-machineinstrs %s -o - | FileCheck %s ; i32 define ptr @vld4_v4i32(ptr %src, ptr %dst) { ; CHECK-LABEL: vld4_v4i32: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vld40.32 {q0, q1, q2, q3}, [r0] ; CHECK-NEXT: vld41.32 {q0, q1, q2, q3}, [r0] ; CHECK-NEXT: vld42.32 {q0, q1, q2, q3}, [r0] ; CHECK-NEXT: vld43.32 {q0, q1, q2, q3}, [r0]! ; CHECK-NEXT: vadd.i32 q2, q2, q3 ; CHECK-NEXT: vadd.i32 q0, q0, q1 ; CHECK-NEXT: vadd.i32 q0, q0, q2 ; CHECK-NEXT: vstrw.32 q0, [r1] ; CHECK-NEXT: bx lr entry: %l1 = load <16 x i32>, ptr %src, align 4 %s1 = shufflevector <16 x i32> %l1, <16 x i32> undef, <4 x i32> %s2 = shufflevector <16 x i32> %l1, <16 x i32> undef, <4 x i32> %s3 = shufflevector <16 x i32> %l1, <16 x i32> undef, <4 x i32> %s4 = shufflevector <16 x i32> %l1, <16 x i32> undef, <4 x i32> %a1 = add <4 x i32> %s1, %s2 %a2 = add <4 x i32> %s3, %s4 %a3 = add <4 x i32> %a1, %a2 store <4 x i32> %a3, ptr %dst %ret = getelementptr inbounds <16 x i32>, ptr %src, i32 1 ret ptr %ret } ; i16 define ptr @vld4_v8i16(ptr %src, ptr %dst) { ; CHECK-LABEL: vld4_v8i16: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vld40.16 {q0, q1, q2, q3}, [r0] ; CHECK-NEXT: vld41.16 {q0, q1, q2, q3}, [r0] ; CHECK-NEXT: vld42.16 {q0, q1, q2, q3}, [r0] ; CHECK-NEXT: vld43.16 {q0, q1, q2, q3}, [r0]! ; CHECK-NEXT: vadd.i16 q2, q2, q3 ; CHECK-NEXT: vadd.i16 q0, q0, q1 ; CHECK-NEXT: vadd.i16 q0, q0, q2 ; CHECK-NEXT: vstrw.32 q0, [r1] ; CHECK-NEXT: bx lr entry: %l1 = load <32 x i16>, ptr %src, align 4 %s1 = shufflevector <32 x i16> %l1, <32 x i16> undef, <8 x i32> %s2 = shufflevector <32 x i16> %l1, <32 x i16> undef, <8 x i32> %s3 = shufflevector <32 x i16> %l1, <32 x i16> undef, <8 x i32> %s4 = shufflevector <32 x i16> %l1, <32 x i16> undef, <8 x i32> %a1 = add <8 x i16> %s1, %s2 %a2 = add <8 x i16> %s3, %s4 %a3 = add <8 x i16> %a1, %a2 store <8 x i16> %a3, ptr %dst %ret = getelementptr inbounds <32 x i16>, ptr %src, i32 1 ret ptr %ret } ; i8 define ptr @vld4_v16i8(ptr %src, ptr %dst) { ; CHECK-LABEL: vld4_v16i8: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vld40.8 {q0, q1, q2, q3}, [r0] ; CHECK-NEXT: vld41.8 {q0, q1, q2, q3}, [r0] ; CHECK-NEXT: vld42.8 {q0, q1, q2, q3}, [r0] ; CHECK-NEXT: vld43.8 {q0, q1, q2, q3}, [r0]! ; CHECK-NEXT: vadd.i8 q2, q2, q3 ; CHECK-NEXT: vadd.i8 q0, q0, q1 ; CHECK-NEXT: vadd.i8 q0, q0, q2 ; CHECK-NEXT: vstrw.32 q0, [r1] ; CHECK-NEXT: bx lr entry: %l1 = load <64 x i8>, ptr %src, align 4 %s1 = shufflevector <64 x i8> %l1, <64 x i8> undef, <16 x i32> %s2 = shufflevector <64 x i8> %l1, <64 x i8> undef, <16 x i32> %s3 = shufflevector <64 x i8> %l1, <64 x i8> undef, <16 x i32> %s4 = shufflevector <64 x i8> %l1, <64 x i8> undef, <16 x i32> %a1 = add <16 x i8> %s1, %s2 %a2 = add <16 x i8> %s3, %s4 %a3 = add <16 x i8> %a1, %a2 store <16 x i8> %a3, ptr %dst %ret = getelementptr inbounds <64 x i8>, ptr %src, i32 1 ret ptr %ret } ; i64 define ptr @vld4_v2i64(ptr %src, ptr %dst) { ; CHECK-LABEL: vld4_v2i64: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr} ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: vldrw.u32 q0, [r0, #16] ; CHECK-NEXT: vldrw.u32 q2, [r0, #48] ; CHECK-NEXT: vldrw.u32 q4, [r0, #32] ; CHECK-NEXT: vmov.f32 s4, s2 ; CHECK-NEXT: vmov.f32 s5, s3 ; CHECK-NEXT: vmov.f32 s2, s8 ; CHECK-NEXT: vmov.f32 s3, s9 ; CHECK-NEXT: vmov lr, r12, d5 ; CHECK-NEXT: vldrw.u32 q2, [r0], #64 ; CHECK-NEXT: vmov r4, r8, d9 ; CHECK-NEXT: vmov.f32 s12, s10 ; CHECK-NEXT: vmov.f32 s13, s11 ; CHECK-NEXT: vmov r2, r7, d1 ; CHECK-NEXT: vmov.f32 s2, s16 ; CHECK-NEXT: vmov.f32 s3, s17 ; CHECK-NEXT: vmov r3, r6, d1 ; CHECK-NEXT: adds.w r2, r2, lr ; CHECK-NEXT: adc.w r7, r7, r12 ; CHECK-NEXT: adds r3, r3, r4 ; CHECK-NEXT: vmov r4, r5, d2 ; CHECK-NEXT: adc.w r6, r6, r8 ; CHECK-NEXT: adds.w r12, r3, r2 ; CHECK-NEXT: vmov r3, r2, d0 ; CHECK-NEXT: adc.w lr, r6, r7 ; CHECK-NEXT: adds r3, r3, r4 ; CHECK-NEXT: vmov r6, r4, d6 ; CHECK-NEXT: adcs r2, r5 ; CHECK-NEXT: vmov r5, r7, d4 ; CHECK-NEXT: adds r5, r5, r6 ; CHECK-NEXT: adcs r4, r7 ; CHECK-NEXT: adds r3, r3, r5 ; CHECK-NEXT: adcs r2, r4 ; CHECK-NEXT: vmov q0[2], q0[0], r3, r12 ; CHECK-NEXT: vmov q0[3], q0[1], r2, lr ; CHECK-NEXT: vstrw.32 q0, [r1] ; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc} entry: %l1 = load <8 x i64>, ptr %src, align 4 %s1 = shufflevector <8 x i64> %l1, <8 x i64> undef, <2 x i32> %s2 = shufflevector <8 x i64> %l1, <8 x i64> undef, <2 x i32> %s3 = shufflevector <8 x i64> %l1, <8 x i64> undef, <2 x i32> %s4 = shufflevector <8 x i64> %l1, <8 x i64> undef, <2 x i32> %a1 = add <2 x i64> %s1, %s2 %a2 = add <2 x i64> %s3, %s4 %a3 = add <2 x i64> %a1, %a2 store <2 x i64> %a3, ptr %dst %ret = getelementptr inbounds <8 x i64>, ptr %src, i32 1 ret ptr %ret } ; f32 define ptr @vld4_v4f32(ptr %src, ptr %dst) { ; CHECK-LABEL: vld4_v4f32: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vld40.32 {q0, q1, q2, q3}, [r0] ; CHECK-NEXT: vld41.32 {q0, q1, q2, q3}, [r0] ; CHECK-NEXT: vld42.32 {q0, q1, q2, q3}, [r0] ; CHECK-NEXT: vld43.32 {q0, q1, q2, q3}, [r0]! ; CHECK-NEXT: vadd.f32 q2, q2, q3 ; CHECK-NEXT: vadd.f32 q0, q0, q1 ; CHECK-NEXT: vadd.f32 q0, q0, q2 ; CHECK-NEXT: vstrw.32 q0, [r1] ; CHECK-NEXT: bx lr entry: %l1 = load <16 x float>, ptr %src, align 4 %s1 = shufflevector <16 x float> %l1, <16 x float> undef, <4 x i32> %s2 = shufflevector <16 x float> %l1, <16 x float> undef, <4 x i32> %s3 = shufflevector <16 x float> %l1, <16 x float> undef, <4 x i32> %s4 = shufflevector <16 x float> %l1, <16 x float> undef, <4 x i32> %a1 = fadd <4 x float> %s1, %s2 %a2 = fadd <4 x float> %s3, %s4 %a3 = fadd <4 x float> %a1, %a2 store <4 x float> %a3, ptr %dst %ret = getelementptr inbounds <16 x float>, ptr %src, i32 1 ret ptr %ret } ; f16 define ptr @vld4_v8f16(ptr %src, ptr %dst) { ; CHECK-LABEL: vld4_v8f16: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vld40.16 {q0, q1, q2, q3}, [r0] ; CHECK-NEXT: vld41.16 {q0, q1, q2, q3}, [r0] ; CHECK-NEXT: vld42.16 {q0, q1, q2, q3}, [r0] ; CHECK-NEXT: vld43.16 {q0, q1, q2, q3}, [r0]! ; CHECK-NEXT: vadd.f16 q2, q2, q3 ; CHECK-NEXT: vadd.f16 q0, q0, q1 ; CHECK-NEXT: vadd.f16 q0, q0, q2 ; CHECK-NEXT: vstrw.32 q0, [r1] ; CHECK-NEXT: bx lr entry: %l1 = load <32 x half>, ptr %src, align 4 %s1 = shufflevector <32 x half> %l1, <32 x half> undef, <8 x i32> %s2 = shufflevector <32 x half> %l1, <32 x half> undef, <8 x i32> %s3 = shufflevector <32 x half> %l1, <32 x half> undef, <8 x i32> %s4 = shufflevector <32 x half> %l1, <32 x half> undef, <8 x i32> %a1 = fadd <8 x half> %s1, %s2 %a2 = fadd <8 x half> %s3, %s4 %a3 = fadd <8 x half> %a1, %a2 store <8 x half> %a3, ptr %dst %ret = getelementptr inbounds <32 x half>, ptr %src, i32 1 ret ptr %ret } ; f64 define ptr @vld4_v2f64(ptr %src, ptr %dst) { ; CHECK-LABEL: vld4_v2f64: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vldrw.u32 q0, [r0, #48] ; CHECK-NEXT: vldrw.u32 q1, [r0, #32] ; CHECK-NEXT: vadd.f64 d0, d0, d1 ; CHECK-NEXT: vadd.f64 d1, d2, d3 ; CHECK-NEXT: vldrw.u32 q1, [r0, #16] ; CHECK-NEXT: vldrw.u32 q2, [r0], #64 ; CHECK-NEXT: vadd.f64 d2, d2, d3 ; CHECK-NEXT: vadd.f64 d3, d4, d5 ; CHECK-NEXT: vadd.f64 d1, d1, d0 ; CHECK-NEXT: vadd.f64 d0, d3, d2 ; CHECK-NEXT: vstrw.32 q0, [r1] ; CHECK-NEXT: bx lr entry: %l1 = load <8 x double>, ptr %src, align 4 %s1 = shufflevector <8 x double> %l1, <8 x double> undef, <2 x i32> %s2 = shufflevector <8 x double> %l1, <8 x double> undef, <2 x i32> %s3 = shufflevector <8 x double> %l1, <8 x double> undef, <2 x i32> %s4 = shufflevector <8 x double> %l1, <8 x double> undef, <2 x i32> %a1 = fadd <2 x double> %s1, %s2 %a2 = fadd <2 x double> %s3, %s4 %a3 = fadd <2 x double> %a1, %a2 store <2 x double> %a3, ptr %dst %ret = getelementptr inbounds <8 x double>, ptr %src, i32 1 ret ptr %ret }