; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s --mattr=+complxnum,+sve,+fullfp16 -o - | FileCheck %s target triple = "aarch64" define @simple_symmetric_muladd2( %a, %b) { ; CHECK-LABEL: simple_symmetric_muladd2: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: adrp x8, .LCPI0_0 ; CHECK-NEXT: add x8, x8, :lo12:.LCPI0_0 ; CHECK-NEXT: ld1rd { z4.d }, p0/z, [x8] ; CHECK-NEXT: fmad z0.d, p0/m, z4.d, z2.d ; CHECK-NEXT: fmad z1.d, p0/m, z4.d, z3.d ; CHECK-NEXT: ret entry: %strided.vec = tail call { , } @llvm.vector.deinterleave2.nxv4f64( %a) %ext00 = extractvalue { , } %strided.vec, 0 %ext01 = extractvalue { , } %strided.vec, 1 %fmul0 = fmul fast %ext00, splat (double 3.200000e+00) %strided.vec44 = tail call { , } @llvm.vector.deinterleave2.nxv4f64( %b) %ext10 = extractvalue { , } %strided.vec44, 0 %ext11 = extractvalue { , } %strided.vec44, 1 %fadd0 = fadd fast %ext10, %fmul0 %fmul1 = fmul fast %ext01, splat (double 3.200000e+00) %fadd1 = fadd fast %ext11, %fmul1 %interleaved.vec = tail call @llvm.vector.interleave2.nxv4f64( %fadd0, %fadd1) ret %interleaved.vec } define @simple_symmetric_unary2( %a) { ; CHECK-LABEL: simple_symmetric_unary2: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: fneg z0.d, p0/m, z0.d ; CHECK-NEXT: fneg z1.d, p0/m, z1.d ; CHECK-NEXT: ret entry: %strided.vec = tail call { , } @llvm.vector.deinterleave2.nxv4f64( %a) %ext00 = extractvalue { , } %strided.vec, 0 %ext01 = extractvalue { , } %strided.vec, 1 %fneg0 = fneg fast %ext00 %fneg1 = fneg fast %ext01 %interleaved.vec = tail call @llvm.vector.interleave2.nxv4f64( %fneg0, %fneg1) ret %interleaved.vec } define @simple_symmetric_muladd4( %a, %b) { ; CHECK-LABEL: simple_symmetric_muladd4: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: adrp x8, .LCPI2_0 ; CHECK-NEXT: add x8, x8, :lo12:.LCPI2_0 ; CHECK-NEXT: ld1rd { z24.d }, p0/z, [x8] ; CHECK-NEXT: fmad z0.d, p0/m, z24.d, z4.d ; CHECK-NEXT: fmad z1.d, p0/m, z24.d, z5.d ; CHECK-NEXT: fmad z2.d, p0/m, z24.d, z6.d ; CHECK-NEXT: fmad z3.d, p0/m, z24.d, z7.d ; CHECK-NEXT: ret entry: %strided.vec = tail call { , , , } @llvm.vector.deinterleave4.nxv8f64( %a) %ext00 = extractvalue { , , , } %strided.vec, 0 %ext01 = extractvalue { , , , } %strided.vec, 1 %ext02 = extractvalue { , , , } %strided.vec, 2 %ext03 = extractvalue { , , , } %strided.vec, 3 %fmul0 = fmul fast %ext00, splat (double 3.200000e+00) %strided.vec44 = tail call { , , , } @llvm.vector.deinterleave4.nxv8f64( %b) %ext10 = extractvalue { , , , } %strided.vec44, 0 %ext11 = extractvalue { , , , } %strided.vec44, 1 %ext12 = extractvalue { , , , } %strided.vec44, 2 %ext13 = extractvalue { , , , } %strided.vec44, 3 %fadd0 = fadd fast %ext10, %fmul0 %fmul1 = fmul fast %ext01, splat (double 3.200000e+00) %fadd1 = fadd fast %ext11, %fmul1 %fmul2 = fmul fast %ext02, splat (double 3.200000e+00) %fadd2 = fadd fast %ext12, %fmul2 %fmul3 = fmul fast %ext03, splat (double 3.200000e+00) %fadd3 = fadd fast %ext13, %fmul3 %interleaved.vec = tail call @llvm.vector.interleave4.nxv8f64( %fadd0, %fadd1, %fadd2, %fadd3) ret %interleaved.vec } define @simple_symmetric_unary4( %a) { ; CHECK-LABEL: simple_symmetric_unary4: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: fneg z0.d, p0/m, z0.d ; CHECK-NEXT: fneg z1.d, p0/m, z1.d ; CHECK-NEXT: fneg z2.d, p0/m, z2.d ; CHECK-NEXT: fneg z3.d, p0/m, z3.d ; CHECK-NEXT: ret entry: %strided.vec = tail call { , , , } @llvm.vector.deinterleave4.nxv8f64( %a) %ext00 = extractvalue { , , , } %strided.vec, 0 %ext01 = extractvalue { , , , } %strided.vec, 1 %ext02 = extractvalue { , , , } %strided.vec, 2 %ext03 = extractvalue { , , , } %strided.vec, 3 %fneg0 = fneg fast %ext00 %fneg1 = fneg fast %ext01 %fneg2 = fneg fast %ext02 %fneg3 = fneg fast %ext03 %interleaved.vec = tail call @llvm.vector.interleave4.nxv8f64( %fneg0, %fneg1, %fneg2, %fneg3) ret %interleaved.vec }