diff options
Diffstat (limited to 'llvm/test/CodeGen/AArch64')
76 files changed, 6735 insertions, 3939 deletions
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-gep.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-gep.ll index f0d9aa4..639b6fd 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-gep.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-gep.ll @@ -20,8 +20,8 @@ define i32 @cse_gep(ptr %ptr, i32 %idx) { ; O0-NEXT: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]] ; O0-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL1]](s64) ; O0-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 - ; O0-NEXT: %11:_(p0) = nuw nusw G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) - ; O0-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD %11(p0) :: (load (s32) from %ir.gep2) + ; O0-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[PTR_ADD1]], [[C1]](s64) + ; O0-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from %ir.gep2) ; O0-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[LOAD1]] ; O0-NEXT: $w0 = COPY [[ADD]](s32) ; O0-NEXT: RET_ReallyLR implicit $w0 @@ -39,8 +39,8 @@ define i32 @cse_gep(ptr %ptr, i32 %idx) { ; O3-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0) ; O3-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY2]](p0) :: (load (s32) from %ir.gep1) ; O3-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 - ; O3-NEXT: %9:_(p0) = nuw nusw G_PTR_ADD [[PTR_ADD]], [[C1]](s64) - ; O3-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD %9(p0) :: (load (s32) from %ir.gep2) + ; O3-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[PTR_ADD]], [[C1]](s64) + ; O3-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from %ir.gep2) ; O3-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[LOAD1]] ; O3-NEXT: $w0 = COPY [[ADD]](s32) ; O3-NEXT: RET_ReallyLR implicit $w0 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-switch.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-switch.ll index 3b12885..79b2e2e 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-switch.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator-switch.ll @@ -795,8 +795,8 @@ define void @jt_multiple_jump_tables(ptr %arg, i32 %arg1, ptr %arg2) { ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[PHI]], [[C111]] ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[GV]], [[MUL]](s64) ; CHECK-NEXT: [[C112:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 - ; CHECK-NEXT: %120:_(p0) = nuw nusw G_PTR_ADD [[PTR_ADD]], [[C112]](s64) - ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD %120(p0) :: (load (p0) from %ir.tmp59) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[PTR_ADD]], [[C112]](s64) + ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[PTR_ADD1]](p0) :: (load (p0) from %ir.tmp59) ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp ; CHECK-NEXT: $x0 = COPY [[COPY]](p0) ; CHECK-NEXT: $x1 = COPY [[LOAD]](p0) diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll index 5115368..675c953 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll @@ -599,10 +599,10 @@ define ptr @test_constant_null() { ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[VAL1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load (s8) from %ir.addr, align 4) ; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 -; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[ADDR]], [[CST1]](s64) +; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST1]](s64) ; CHECK: [[VAL2:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p0) :: (load (s32) from %ir.addr + 4) ; CHECK: G_STORE [[VAL1]](s8), [[ADDR]](p0) :: (store (s8) into %ir.addr, align 4) -; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[ADDR]], [[CST1]](s64) +; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST1]](s64) ; CHECK: G_STORE [[VAL2]](s32), [[GEP2]](p0) :: (store (s32) into %ir.addr + 4) define void @test_struct_memops(ptr %addr) { %val = load { i8, i32 }, ptr %addr @@ -706,7 +706,7 @@ define float @test_frem(float %arg1, float %arg2) { ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_SADDO [[LHS]], [[RHS]] ; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store (s32) into %ir.addr) ; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 -; CHECK: [[GEP:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[ADDR]], [[CST]](s64) +; CHECK: [[GEP:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST]](s64) ; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.addr + 4, align 4) declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) define void @test_sadd_overflow(i32 %lhs, i32 %rhs, ptr %addr) { @@ -722,7 +722,7 @@ define void @test_sadd_overflow(i32 %lhs, i32 %rhs, ptr %addr) { ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_UADDO [[LHS]], [[RHS]] ; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store (s32) into %ir.addr) ; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 -; CHECK: [[GEP:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[ADDR]], [[CST]](s64) +; CHECK: [[GEP:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST]](s64) ; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.addr + 4, align 4) declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) define void @test_uadd_overflow(i32 %lhs, i32 %rhs, ptr %addr) { @@ -738,7 +738,7 @@ define void @test_uadd_overflow(i32 %lhs, i32 %rhs, ptr %addr) { ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_SSUBO [[LHS]], [[RHS]] ; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store (s32) into %ir.subr) ; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 -; CHECK: [[GEP:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[ADDR]], [[CST]](s64) +; CHECK: [[GEP:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST]](s64) ; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.subr + 4, align 4) declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32) define void @test_ssub_overflow(i32 %lhs, i32 %rhs, ptr %subr) { @@ -754,7 +754,7 @@ define void @test_ssub_overflow(i32 %lhs, i32 %rhs, ptr %subr) { ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_USUBO [[LHS]], [[RHS]] ; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store (s32) into %ir.subr) ; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 -; CHECK: [[GEP:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[ADDR]], [[CST]](s64) +; CHECK: [[GEP:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST]](s64) ; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.subr + 4, align 4) declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32) define void @test_usub_overflow(i32 %lhs, i32 %rhs, ptr %subr) { @@ -770,7 +770,7 @@ define void @test_usub_overflow(i32 %lhs, i32 %rhs, ptr %subr) { ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_SMULO [[LHS]], [[RHS]] ; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store (s32) into %ir.addr) ; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 -; CHECK: [[GEP:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[ADDR]], [[CST]](s64) +; CHECK: [[GEP:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST]](s64) ; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.addr + 4, align 4) declare { i32, i1 } @llvm.smul.with.overflow.i32(i32, i32) define void @test_smul_overflow(i32 %lhs, i32 %rhs, ptr %addr) { @@ -786,7 +786,7 @@ define void @test_smul_overflow(i32 %lhs, i32 %rhs, ptr %addr) { ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_UMULO [[LHS]], [[RHS]] ; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store (s32) into %ir.addr) ; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 -; CHECK: [[GEP:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[ADDR]], [[CST]](s64) +; CHECK: [[GEP:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST]](s64) ; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.addr + 4, align 4) declare { i32, i1 } @llvm.umul.with.overflow.i32(i32, i32) define void @test_umul_overflow(i32 %lhs, i32 %rhs, ptr %addr) { @@ -799,13 +799,13 @@ define void @test_umul_overflow(i32 %lhs, i32 %rhs, ptr %addr) { ; CHECK: %0:_(p0) = COPY $x0 ; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load (s8) from %ir.addr, align 4) ; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 -; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %0, [[CST1]](s64) +; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST1]](s64) ; CHECK: [[LD2:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p0) :: (load (s8) from %ir.addr + 4, align 4) ; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 -; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %0, [[CST2]](s64) +; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST2]](s64) ; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load (s32) from %ir.addr + 8) ; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 -; CHECK: [[GEP3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %0, [[CST3]](s64) +; CHECK: [[GEP3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST3]](s64) ; CHECK: [[LD4:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load (s32) from %ir.addr + 12) ; CHECK: $w0 = COPY [[LD3]](s32) %struct.nested = type {i8, { i8, i32 }, i32} @@ -820,16 +820,16 @@ define i32 @test_extractvalue(ptr %addr) { ; CHECK: %1:_(p0) = COPY $x1 ; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load (s8) from %ir.addr, align 4) ; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 -; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %0, [[CST1]](s64) +; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST1]](s64) ; CHECK: [[LD2:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p0) :: (load (s8) from %ir.addr + 4, align 4) ; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 -; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %0, [[CST2]](s64) +; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST2]](s64) ; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load (s32) from %ir.addr + 8) ; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 -; CHECK: [[GEP3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %0, [[CST3]](s64) +; CHECK: [[GEP3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST3]](s64) ; CHECK: [[LD4:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load (s32) from %ir.addr + 12) ; CHECK: G_STORE [[LD2]](s8), %1(p0) :: (store (s8) into %ir.addr2, align 4) -; CHECK: [[GEP4:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %1, [[CST1]](s64) +; CHECK: [[GEP4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %1, [[CST1]](s64) ; CHECK: G_STORE [[LD3]](s32), [[GEP4]](p0) :: (store (s32) into %ir.addr2 + 4) define void @test_extractvalue_agg(ptr %addr, ptr %addr2) { %struct = load %struct.nested, ptr %addr @@ -854,20 +854,20 @@ define void @test_trivial_extract_ptr([1 x ptr] %s, i8 %val) { ; CHECK: %1:_(s32) = COPY $w1 ; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load (s8) from %ir.addr, align 4) ; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 -; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %0, [[CST1]](s64) +; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST1]](s64) ; CHECK: [[LD2:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p0) :: (load (s8) from %ir.addr + 4, align 4) ; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 -; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %0, [[CST2]](s64) +; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST2]](s64) ; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load (s32) from %ir.addr + 8) ; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 -; CHECK: [[GEP3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %0, [[CST3]](s64) +; CHECK: [[GEP3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST3]](s64) ; CHECK: [[LD4:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load (s32) from %ir.addr + 12) ; CHECK: G_STORE [[LD1]](s8), %0(p0) :: (store (s8) into %ir.addr, align 4) -; CHECK: [[GEP4:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %0, [[CST1]](s64) +; CHECK: [[GEP4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST1]](s64) ; CHECK: G_STORE [[LD2]](s8), [[GEP4]](p0) :: (store (s8) into %ir.addr + 4, align 4) -; CHECK: [[GEP5:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %0, [[CST2]](s64) +; CHECK: [[GEP5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST2]](s64) ; CHECK: G_STORE %1(s32), [[GEP5]](p0) :: (store (s32) into %ir.addr + 8) -; CHECK: [[GEP6:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %0, [[CST3]](s64) +; CHECK: [[GEP6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST3]](s64) ; CHECK: G_STORE [[LD4]](s32), [[GEP6]](p0) :: (store (s32) into %ir.addr + 12) define void @test_insertvalue(ptr %addr, i32 %val) { %struct = load %struct.nested, ptr %addr @@ -899,23 +899,23 @@ define [1 x ptr] @test_trivial_insert_ptr([1 x ptr] %s, ptr %val) { ; CHECK: %1:_(p0) = COPY $x1 ; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %1(p0) :: (load (s8) from %ir.addr2, align 4) ; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 -; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %1, [[CST1]](s64) +; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %1, [[CST1]](s64) ; CHECK: [[LD2:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p0) :: (load (s32) from %ir.addr2 + 4) ; CHECK: [[LD3:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load (s8) from %ir.addr, align 4) -; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %0, [[CST1]](s64) +; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST1]](s64) ; CHECK: [[LD4:%[0-9]+]]:_(s8) = G_LOAD [[GEP2]](p0) :: (load (s8) from %ir.addr + 4, align 4) ; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 -; CHECK: [[GEP3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %0, [[CST3]](s64) +; CHECK: [[GEP3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST3]](s64) ; CHECK: [[LD5:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load (s32) from %ir.addr + 8) ; CHECK: [[CST4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12 -; CHECK: [[GEP4:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %0, [[CST4]](s64) +; CHECK: [[GEP4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST4]](s64) ; CHECK: [[LD6:%[0-9]+]]:_(s32) = G_LOAD [[GEP4]](p0) :: (load (s32) from %ir.addr + 12) ; CHECK: G_STORE [[LD3]](s8), %0(p0) :: (store (s8) into %ir.addr, align 4) -; CHECK: [[GEP5:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %0, [[CST1]](s64) +; CHECK: [[GEP5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST1]](s64) ; CHECK: G_STORE [[LD1]](s8), [[GEP5]](p0) :: (store (s8) into %ir.addr + 4, align 4) -; CHECK: [[GEP6:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %0, [[CST3]](s64) +; CHECK: [[GEP6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST3]](s64) ; CHECK: G_STORE [[LD2]](s32), [[GEP6]](p0) :: (store (s32) into %ir.addr + 8) -; CHECK: [[GEP7:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %0, [[CST4]](s64) +; CHECK: [[GEP7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST4]](s64) ; CHECK: G_STORE [[LD6]](s32), [[GEP7]](p0) :: (store (s32) into %ir.addr + 12) define void @test_insertvalue_agg(ptr %addr, ptr %addr2) { %smallstruct = load {i8, i32}, ptr %addr2 @@ -1905,19 +1905,19 @@ define void @test_phi_diamond(ptr %a.ptr, ptr %b.ptr, i1 %selector, ptr %dst) { ; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD [[ARG1]](p0) :: (load (s8) from %ir.a.ptr, align 4) ; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 -; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[ARG1]], [[CST1]](s64) +; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ARG1]], [[CST1]](s64) ; CHECK: [[LD2:%[0-9]+]]:_(s16) = G_LOAD [[GEP1]](p0) :: (load (s16) from %ir.a.ptr + 2) ; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 -; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[ARG1]], [[CST2]](s64) +; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ARG1]], [[CST2]](s64) ; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load (s32) from %ir.a.ptr + 4) ; CHECK: G_BR %bb.4 ; CHECK: [[LD4:%[0-9]+]]:_(s8) = G_LOAD [[ARG2]](p0) :: (load (s8) from %ir.b.ptr, align 4) ; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 -; CHECK: [[GEP3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[ARG2]], [[CST3]](s64) +; CHECK: [[GEP3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ARG2]], [[CST3]](s64) ; CHECK: [[LD5:%[0-9]+]]:_(s16) = G_LOAD [[GEP3]](p0) :: (load (s16) from %ir.b.ptr + 2) ; CHECK: [[CST4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 -; CHECK: [[GEP4:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[ARG2]], [[CST4]](s64) +; CHECK: [[GEP4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ARG2]], [[CST4]](s64) ; CHECK: [[LD6:%[0-9]+]]:_(s32) = G_LOAD [[GEP4]](p0) :: (load (s32) from %ir.b.ptr + 4) ; CHECK: [[PN1:%[0-9]+]]:_(s8) = G_PHI [[LD1]](s8), %bb.2, [[LD4]](s8), %bb.3 @@ -1925,10 +1925,10 @@ define void @test_phi_diamond(ptr %a.ptr, ptr %b.ptr, i1 %selector, ptr %dst) { ; CHECK: [[PN3:%[0-9]+]]:_(s32) = G_PHI [[LD3]](s32), %bb.2, [[LD6]](s32), %bb.3 ; CHECK: G_STORE [[PN1]](s8), [[ARG4]](p0) :: (store (s8) into %ir.dst, align 4) ; CHECK: [[CST5:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 -; CHECK: [[GEP5:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[ARG4]], [[CST5]](s64) +; CHECK: [[GEP5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ARG4]], [[CST5]](s64) ; CHECK: G_STORE [[PN2]](s16), [[GEP5]](p0) :: (store (s16) into %ir.dst + 2) ; CHECK: [[CST6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 -; CHECK: [[GEP6:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[ARG4]], [[CST6]](s64) +; CHECK: [[GEP6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ARG4]], [[CST6]](s64) ; CHECK: G_STORE [[PN3]](s32), [[GEP6]](p0) :: (store (s32) into %ir.dst + 4) ; CHECK: RET_ReallyLR @@ -1964,22 +1964,22 @@ define void @test_nested_aggregate_const(ptr %ptr) { ; CHECK: [[CST6:%[0-9]+]]:_(s32) = G_CONSTANT i32 13 ; CHECK: G_STORE [[CST1]](s32), [[BASE]](p0) :: (store (s32) into %ir.ptr, align 8) ; CHECK: [[CST7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 -; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[BASE]], [[CST7]](s64) +; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[BASE]], [[CST7]](s64) ; CHECK: G_STORE [[CST1]](s32), [[GEP1]](p0) :: (store (s32) into %ir.ptr + 4) ; CHECK: [[CST8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 -; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[BASE]], [[CST8]](s64) +; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[BASE]], [[CST8]](s64) ; CHECK: G_STORE [[CST2]](s16), [[GEP2]](p0) :: (store (s16) into %ir.ptr + 8, align 8) ; CHECK: [[CST9:%[0-9]+]]:_(s64) = G_CONSTANT i64 10 -; CHECK: [[GEP3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[BASE]], [[CST9]](s64) +; CHECK: [[GEP3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[BASE]], [[CST9]](s64) ; CHECK: G_STORE [[CST3]](s8), [[GEP3]](p0) :: (store (s8) into %ir.ptr + 10, align 2) ; CHECK: [[CST10:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 -; CHECK: [[GEP4:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[BASE]], [[CST10]](s64) +; CHECK: [[GEP4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[BASE]], [[CST10]](s64) ; CHECK: G_STORE [[CST4]](s64), [[GEP4]](p0) :: (store (s64) into %ir.ptr + 16) ; CHECK: [[CST11:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 -; CHECK: [[GEP5:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[BASE]], [[CST11]](s64) +; CHECK: [[GEP5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[BASE]], [[CST11]](s64) ; CHECK: G_STORE [[CST5]](s64), [[GEP5]](p0) :: (store (s64) into %ir.ptr + 24) ; CHECK: [[CST12:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 -; CHECK: [[GEP6:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[BASE]], [[CST12]](s64) +; CHECK: [[GEP6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[BASE]], [[CST12]](s64) ; CHECK: G_STORE [[CST6]](s32), [[GEP6]](p0) :: (store (s32) into %ir.ptr + 32, align 8) store %agg.nested { i32 1, i32 1, %agg.inner { i16 2, i8 3, %agg.inner.inner {i64 5, i64 8} }, i32 13}, ptr %ptr ret void @@ -2519,7 +2519,7 @@ define {i8, i32} @test_freeze_struct(ptr %addr) { ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p0) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C]] + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]] ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s8) = G_FREEZE [[LOAD]] ; CHECK-NEXT: [[FREEZE1:%[0-9]+]]:_(s32) = G_FREEZE [[LOAD1]] diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll index 2779e89..4a85d84 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-pcsections.ll @@ -12,7 +12,7 @@ define i32 @val_compare_and_swap(ptr %p, i32 %cmp, i32 %new) { ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK-NEXT: liveins: $w1, $w2, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s32) from %ir.p) + ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s32) from %ir.p) ; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w1, 0, implicit-def $nzcv, pcsections !0 ; CHECK-NEXT: Bcc 1, %bb.3, implicit killed $nzcv, pcsections !0 ; CHECK-NEXT: {{ $}} @@ -46,13 +46,13 @@ define i32 @val_compare_and_swap_from_load(ptr %p, i32 %cmp, ptr %pnew) { ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: liveins: $w1, $x0, $x2 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w9 = LDRWui killed renamable $x2, 0, implicit-def $x9, pcsections !0 :: (load (s32) from %ir.pnew) + ; CHECK-NEXT: renamable $w9 = LDRWui killed renamable $x2, 0, implicit-def renamable $x9, pcsections !0 :: (load (s32) from %ir.pnew) ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1.cmpxchg.start: ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0, $x9 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s32) from %ir.p) + ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s32) from %ir.p) ; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w1, 0, implicit-def $nzcv, pcsections !0 ; CHECK-NEXT: Bcc 1, %bb.3, implicit killed $nzcv, pcsections !0 ; CHECK-NEXT: {{ $}} @@ -91,7 +91,7 @@ define i32 @val_compare_and_swap_rel(ptr %p, i32 %cmp, i32 %new) { ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000) ; CHECK-NEXT: liveins: $w1, $w2, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s32) from %ir.p) + ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s32) from %ir.p) ; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w1, 0, implicit-def $nzcv, pcsections !0 ; CHECK-NEXT: Bcc 1, %bb.3, implicit killed $nzcv, pcsections !0 ; CHECK-NEXT: {{ $}} @@ -243,7 +243,7 @@ define i32 @fetch_and_nand(ptr %p) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDXRW renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s32) from %ir.p) + ; CHECK-NEXT: renamable $w8 = LDXRW renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s32) from %ir.p) ; CHECK-NEXT: renamable $w9 = ANDWri renamable $w8, 2, pcsections !0 ; CHECK-NEXT: $w9 = ORNWrs $wzr, killed renamable $w9, 0, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w10 = STLXRW killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s32) into %ir.p) @@ -295,7 +295,7 @@ define i32 @fetch_and_or(ptr %p) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w9, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s32) from %ir.p) + ; CHECK-NEXT: renamable $w8 = LDAXRW renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s32) from %ir.p) ; CHECK-NEXT: $w10 = ORRWrs renamable $w8, renamable $w9, 0, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w11 = STLXRW killed renamable $w10, renamable $x0, pcsections !0 :: (volatile store (s32) into %ir.p) ; CHECK-NEXT: CBNZW killed renamable $w11, %bb.1, pcsections !0 @@ -726,7 +726,7 @@ define i8 @atomicrmw_add_i8(ptr %ptr, i8 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) ; CHECK-NEXT: $w9 = ADDWrs renamable $w8, renamable $w1, 0, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w10 = STLXRB killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s8) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 @@ -750,7 +750,7 @@ define i8 @atomicrmw_xchg_i8(ptr %ptr, i8 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) ; CHECK-NEXT: early-clobber renamable $w9 = STXRB renamable $w1, renamable $x0, pcsections !0 :: (volatile store (s8) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w9, %bb.1, pcsections !0 ; CHECK-NEXT: {{ $}} @@ -773,7 +773,7 @@ define i8 @atomicrmw_sub_i8(ptr %ptr, i8 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) ; CHECK-NEXT: $w9 = SUBWrs renamable $w8, renamable $w1, 0, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w10 = STXRB killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s8) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 @@ -797,7 +797,7 @@ define i8 @atomicrmw_and_i8(ptr %ptr, i8 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) ; CHECK-NEXT: $w9 = ANDWrs renamable $w8, renamable $w1, 0, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w10 = STLXRB killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s8) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 @@ -821,7 +821,7 @@ define i8 @atomicrmw_or_i8(ptr %ptr, i8 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) ; CHECK-NEXT: $w9 = ORRWrs renamable $w8, renamable $w1, 0, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w10 = STLXRB killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s8) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 @@ -845,7 +845,7 @@ define i8 @atomicrmw_xor_i8(ptr %ptr, i8 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) ; CHECK-NEXT: $w9 = EORWrs renamable $w8, renamable $w1, 0, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w10 = STXRB killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s8) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 @@ -869,7 +869,7 @@ define i8 @atomicrmw_min_i8(ptr %ptr, i8 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) ; CHECK-NEXT: renamable $w9 = SBFMWri renamable $w8, 0, 7, pcsections !0 ; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 32, implicit-def $nzcv, pcsections !0 ; CHECK-NEXT: renamable $w9 = CSELWr renamable $w8, renamable $w1, 11, implicit killed $nzcv, pcsections !0 @@ -895,7 +895,7 @@ define i8 @atomicrmw_max_i8(ptr %ptr, i8 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) ; CHECK-NEXT: renamable $w9 = SBFMWri renamable $w8, 0, 7, pcsections !0 ; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 32, implicit-def $nzcv, pcsections !0 ; CHECK-NEXT: renamable $w9 = CSELWr renamable $w8, renamable $w1, 12, implicit killed $nzcv, pcsections !0 @@ -923,10 +923,10 @@ define i8 @atomicrmw_umin_i8(ptr %ptr, i8 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w9, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDAXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) ; CHECK-NEXT: renamable $w8 = ANDWri renamable $w8, 7, implicit killed $x8 ; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w9, 0, implicit-def $nzcv, pcsections !0 - ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 3, implicit killed $nzcv, implicit-def $x10, pcsections !0 + ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 3, implicit killed $nzcv, implicit-def renamable $x10, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w11 = STLXRB renamable $w10, renamable $x0, implicit killed $x10, pcsections !0 :: (volatile store (s8) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w11, %bb.1, pcsections !0 ; CHECK-NEXT: {{ $}} @@ -951,10 +951,10 @@ define i8 @atomicrmw_umax_i8(ptr %ptr, i8 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w9, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDXRB renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s8) from %ir.ptr) ; CHECK-NEXT: renamable $w8 = ANDWri renamable $w8, 7, implicit killed $x8 ; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w9, 0, implicit-def $nzcv, pcsections !0 - ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 8, implicit killed $nzcv, implicit-def $x10, pcsections !0 + ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 8, implicit killed $nzcv, implicit-def renamable $x10, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w11 = STXRB renamable $w10, renamable $x0, implicit killed $x10, pcsections !0 :: (volatile store (s8) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w11, %bb.1, pcsections !0 ; CHECK-NEXT: {{ $}} @@ -977,7 +977,7 @@ define i16 @atomicrmw_add_i16(ptr %ptr, i16 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) ; CHECK-NEXT: $w9 = ADDWrs renamable $w8, renamable $w1, 0, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w10 = STLXRH killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s16) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 @@ -1001,7 +1001,7 @@ define i16 @atomicrmw_xchg_i16(ptr %ptr, i16 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) ; CHECK-NEXT: early-clobber renamable $w9 = STXRH renamable $w1, renamable $x0, pcsections !0 :: (volatile store (s16) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w9, %bb.1, pcsections !0 ; CHECK-NEXT: {{ $}} @@ -1024,7 +1024,7 @@ define i16 @atomicrmw_sub_i16(ptr %ptr, i16 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) ; CHECK-NEXT: $w9 = SUBWrs renamable $w8, renamable $w1, 0, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w10 = STXRH killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s16) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 @@ -1048,7 +1048,7 @@ define i16 @atomicrmw_and_i16(ptr %ptr, i16 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) ; CHECK-NEXT: $w9 = ANDWrs renamable $w8, renamable $w1, 0, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w10 = STLXRH killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s16) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 @@ -1072,7 +1072,7 @@ define i16 @atomicrmw_or_i16(ptr %ptr, i16 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) ; CHECK-NEXT: $w9 = ORRWrs renamable $w8, renamable $w1, 0, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w10 = STLXRH killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s16) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 @@ -1096,7 +1096,7 @@ define i16 @atomicrmw_xor_i16(ptr %ptr, i16 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) ; CHECK-NEXT: $w9 = EORWrs renamable $w8, renamable $w1, 0, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w10 = STXRH killed renamable $w9, renamable $x0, pcsections !0 :: (volatile store (s16) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w10, %bb.1, pcsections !0 @@ -1120,7 +1120,7 @@ define i16 @atomicrmw_min_i16(ptr %ptr, i16 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) ; CHECK-NEXT: renamable $w9 = SBFMWri renamable $w8, 0, 15, pcsections !0 ; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 40, implicit-def $nzcv, pcsections !0 ; CHECK-NEXT: renamable $w9 = CSELWr renamable $w8, renamable $w1, 11, implicit killed $nzcv, pcsections !0 @@ -1146,7 +1146,7 @@ define i16 @atomicrmw_max_i16(ptr %ptr, i16 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w1, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) ; CHECK-NEXT: renamable $w9 = SBFMWri renamable $w8, 0, 15, pcsections !0 ; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 40, implicit-def $nzcv, pcsections !0 ; CHECK-NEXT: renamable $w9 = CSELWr renamable $w8, renamable $w1, 12, implicit killed $nzcv, pcsections !0 @@ -1174,10 +1174,10 @@ define i16 @atomicrmw_umin_i16(ptr %ptr, i16 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w9, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDAXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) ; CHECK-NEXT: renamable $w8 = ANDWri renamable $w8, 15, implicit killed $x8 ; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w9, 0, implicit-def $nzcv, pcsections !0 - ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 3, implicit killed $nzcv, implicit-def $x10, pcsections !0 + ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 3, implicit killed $nzcv, implicit-def renamable $x10, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w11 = STLXRH renamable $w10, renamable $x0, implicit killed $x10, pcsections !0 :: (volatile store (s16) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w11, %bb.1, pcsections !0 ; CHECK-NEXT: {{ $}} @@ -1202,10 +1202,10 @@ define i16 @atomicrmw_umax_i16(ptr %ptr, i16 %rhs) { ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000) ; CHECK-NEXT: liveins: $w9, $x0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: renamable $w8 = LDXRH renamable $x0, implicit-def renamable $x8, pcsections !0 :: (volatile load (s16) from %ir.ptr) ; CHECK-NEXT: renamable $w8 = ANDWri renamable $w8, 15, implicit killed $x8 ; CHECK-NEXT: $wzr = SUBSWrs renamable $w8, renamable $w9, 0, implicit-def $nzcv, pcsections !0 - ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 8, implicit killed $nzcv, implicit-def $x10, pcsections !0 + ; CHECK-NEXT: renamable $w10 = CSELWr renamable $w8, renamable $w9, 8, implicit killed $nzcv, implicit-def renamable $x10, pcsections !0 ; CHECK-NEXT: early-clobber renamable $w11 = STXRH renamable $w10, renamable $x0, implicit killed $x10, pcsections !0 :: (volatile store (s16) into %ir.ptr) ; CHECK-NEXT: CBNZW killed renamable $w11, %bb.1, pcsections !0 ; CHECK-NEXT: {{ $}} @@ -1230,7 +1230,7 @@ define { i8, i1 } @cmpxchg_i8(ptr %ptr, i8 %desired, i8 %new) { ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.4(0x04000000) ; CHECK-NEXT: liveins: $w1, $w2, $x8 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w0 = LDXRB renamable $x8, implicit-def $x0, pcsections !0 :: (volatile load (s8) from %ir.ptr) + ; CHECK-NEXT: renamable $w0 = LDXRB renamable $x8, implicit-def renamable $x0, pcsections !0 :: (volatile load (s8) from %ir.ptr) ; CHECK-NEXT: renamable $w9 = ANDWri renamable $w0, 7, pcsections !0 ; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 0, implicit-def $nzcv, pcsections !0 ; CHECK-NEXT: Bcc 1, %bb.4, implicit killed $nzcv, pcsections !0 @@ -1272,7 +1272,7 @@ define { i16, i1 } @cmpxchg_i16(ptr %ptr, i16 %desired, i16 %new) { ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.4(0x04000000) ; CHECK-NEXT: liveins: $w1, $w2, $x8 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: renamable $w0 = LDXRH renamable $x8, implicit-def $x0, pcsections !0 :: (volatile load (s16) from %ir.ptr) + ; CHECK-NEXT: renamable $w0 = LDXRH renamable $x8, implicit-def renamable $x0, pcsections !0 :: (volatile load (s16) from %ir.ptr) ; CHECK-NEXT: renamable $w9 = ANDWri renamable $w0, 15, pcsections !0 ; CHECK-NEXT: dead $wzr = SUBSWrx killed renamable $w9, renamable $w1, 8, implicit-def $nzcv, pcsections !0 ; CHECK-NEXT: Bcc 1, %bb.4, implicit killed $nzcv, pcsections !0 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/call-lowering-sret-demotion.ll b/llvm/test/CodeGen/AArch64/GlobalISel/call-lowering-sret-demotion.ll index a82d7fd..08021cc 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/call-lowering-sret-demotion.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/call-lowering-sret-demotion.ll @@ -11,28 +11,28 @@ define [9 x i64] @callee_sret_demotion() { ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 ; CHECK-NEXT: G_STORE [[C]](s64), [[COPY]](p0) :: (store (s64)) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64) ; CHECK-NEXT: G_STORE [[C]](s64), [[PTR_ADD]](p0) :: (store (s64)) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64) ; CHECK-NEXT: G_STORE [[C]](s64), [[PTR_ADD1]](p0) :: (store (s64)) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 - ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64) ; CHECK-NEXT: G_STORE [[C]](s64), [[PTR_ADD2]](p0) :: (store (s64)) ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C4]](s64) + ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64) ; CHECK-NEXT: G_STORE [[C]](s64), [[PTR_ADD3]](p0) :: (store (s64)) ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 40 - ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C5]](s64) + ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64) ; CHECK-NEXT: G_STORE [[C]](s64), [[PTR_ADD4]](p0) :: (store (s64)) ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 - ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C6]](s64) + ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64) ; CHECK-NEXT: G_STORE [[C]](s64), [[PTR_ADD5]](p0) :: (store (s64)) ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 56 - ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C7]](s64) + ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64) ; CHECK-NEXT: G_STORE [[C]](s64), [[PTR_ADD6]](p0) :: (store (s64)) ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 64 - ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C8]](s64) + ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s64) ; CHECK-NEXT: G_STORE [[C]](s64), [[PTR_ADD7]](p0) :: (store (s64)) ; CHECK-NEXT: RET_ReallyLR ret [9 x i64] zeroinitializer @@ -48,28 +48,28 @@ define i64 @caller() { ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s64) from %stack.0) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[FRAME_INDEX]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C]](s64) ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from %stack.0) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[FRAME_INDEX]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C1]](s64) ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (load (s64) from %stack.0) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 - ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[FRAME_INDEX]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C2]](s64) ; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD2]](p0) :: (load (s64) from %stack.0) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[FRAME_INDEX]], [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C3]](s64) ; CHECK-NEXT: [[LOAD4:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD3]](p0) :: (load (s64) from %stack.0) ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 40 - ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[FRAME_INDEX]], [[C4]](s64) + ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C4]](s64) ; CHECK-NEXT: [[LOAD5:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD4]](p0) :: (load (s64) from %stack.0) ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 - ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[FRAME_INDEX]], [[C5]](s64) + ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C5]](s64) ; CHECK-NEXT: [[LOAD6:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD5]](p0) :: (load (s64) from %stack.0) ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 56 - ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[FRAME_INDEX]], [[C6]](s64) + ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C6]](s64) ; CHECK-NEXT: [[LOAD7:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD6]](p0) :: (load (s64) from %stack.0) ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 64 - ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[FRAME_INDEX]], [[C7]](s64) + ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C7]](s64) ; CHECK-NEXT: [[LOAD8:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD7]](p0) :: (load (s64) from %stack.0) ; CHECK-NEXT: $x0 = COPY [[LOAD4]](s64) ; CHECK-NEXT: RET_ReallyLR implicit $x0 @@ -88,28 +88,28 @@ define i64 @caller_tail() { ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s64) from %stack.0) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[FRAME_INDEX]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C]](s64) ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from %stack.0) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[FRAME_INDEX]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C1]](s64) ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (load (s64) from %stack.0) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 - ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[FRAME_INDEX]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C2]](s64) ; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD2]](p0) :: (load (s64) from %stack.0) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[FRAME_INDEX]], [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C3]](s64) ; CHECK-NEXT: [[LOAD4:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD3]](p0) :: (load (s64) from %stack.0) ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 40 - ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[FRAME_INDEX]], [[C4]](s64) + ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C4]](s64) ; CHECK-NEXT: [[LOAD5:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD4]](p0) :: (load (s64) from %stack.0) ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 - ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[FRAME_INDEX]], [[C5]](s64) + ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C5]](s64) ; CHECK-NEXT: [[LOAD6:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD5]](p0) :: (load (s64) from %stack.0) ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 56 - ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[FRAME_INDEX]], [[C6]](s64) + ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C6]](s64) ; CHECK-NEXT: [[LOAD7:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD6]](p0) :: (load (s64) from %stack.0) ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 64 - ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[FRAME_INDEX]], [[C7]](s64) + ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C7]](s64) ; CHECK-NEXT: [[LOAD8:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD7]](p0) :: (load (s64) from %stack.0) ; CHECK-NEXT: $x0 = COPY [[LOAD4]](s64) ; CHECK-NEXT: RET_ReallyLR implicit $x0 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-cse.ll b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-cse.ll index 8992a95..39860a7 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-cse.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-cse.ll @@ -4,7 +4,7 @@ ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[LO:%[0-9]+]]:_(s64) = G_LOAD %0(p0) :: (load (s64) from %ir.ptr) ; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 -; CHECK: [[GEP:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[ADDR]], [[CST]](s64) +; CHECK: [[GEP:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST]](s64) ; CHECK: [[HI:%[0-9]+]]:_(s64) = G_LOAD [[GEP]](p0) :: (load (s64) from %ir.ptr + 8) ; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-ios.ll b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-ios.ll index 8704331..b3e436b 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-ios.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-ios.ll @@ -61,7 +61,7 @@ define void @take_128bit_struct(ptr %ptr, [2 x i64] %in) { ; CHECK-LABEL: name: test_split_struct ; CHECK: [[LD1:%[0-9]+]]:_(s64) = G_LOAD %0(p0) :: (load (s64) from %ir.ptr) ; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 -; CHECK: [[GEP:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %0, [[CST]](s64) +; CHECK: [[GEP:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST]](s64) ; CHECK: [[LD2:%[0-9]+]]:_(s64) = G_LOAD %3(p0) :: (load (s64) from %ir.ptr + 8) ; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator.ll b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator.ll index 93811d2..36529be 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator.ll @@ -67,10 +67,10 @@ define void @test_multiple_args(i64 %in) { ; CHECK: G_STORE [[DBL]](s64), [[ADDR]](p0) :: (store (s64) into %ir.addr) ; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 -; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[ADDR]], [[CST1]](s64) +; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST1]](s64) ; CHECK: G_STORE [[I64]](s64), [[GEP1]](p0) :: (store (s64) into %ir.addr + 8) ; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 -; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[ADDR]], [[CST2]](s64) +; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST2]](s64) ; CHECK: G_STORE [[I8]](s8), [[GEP2]](p0) :: (store (s8) into %ir.addr + 16, align 8) ; CHECK: RET_ReallyLR define void @test_struct_formal({double, i64, i8} %in, ptr %addr) { @@ -84,10 +84,10 @@ define void @test_struct_formal({double, i64, i8} %in, ptr %addr) { ; CHECK: [[LD1:%[0-9]+]]:_(s64) = G_LOAD [[ADDR]](p0) :: (load (s64) from %ir.addr) ; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 -; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[ADDR]], [[CST1]](s64) +; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST1]](s64) ; CHECK: [[LD2:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p0) :: (load (s64) from %ir.addr + 8) ; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 -; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[ADDR]], [[CST2]](s64) +; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST2]](s64) ; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load (s32) from %ir.addr + 16, align 8) ; CHECK: $d0 = COPY [[LD1]](s64) @@ -103,13 +103,13 @@ define {double, i64, i32} @test_struct_return(ptr %addr) { ; CHECK: %0:_(p0) = COPY $x0 ; CHECK: [[LD1:%[0-9]+]]:_(s64) = G_LOAD %0(p0) :: (load (s64) from %ir.addr) ; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 -; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %0, [[CST1]](s64) +; CHECK: [[GEP1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST1]](s64) ; CHECK: [[LD2:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p0) :: (load (s64) from %ir.addr + 8) ; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 -; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %0, [[CST2]](s64) +; CHECK: [[GEP2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST2]](s64) ; CHECK: [[LD3:%[0-9]+]]:_(s64) = G_LOAD [[GEP2]](p0) :: (load (s64) from %ir.addr + 16) ; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 -; CHECK: [[GEP3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %0, [[CST3]](s64) +; CHECK: [[GEP3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %0, [[CST3]](s64) ; CHECK: [[LD4:%[0-9]+]]:_(s64) = G_LOAD [[GEP3]](p0) :: (load (s64) from %ir.addr + 24) ; CHECK: $x0 = COPY [[LD1]](s64) @@ -286,7 +286,7 @@ define void @take_128bit_struct(ptr %ptr, [2 x i64] %in) { ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK: [[LO:%[0-9]+]]:_(s64) = G_LOAD %0(p0) :: (load (s64) from %ir.ptr) ; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 -; CHECK: [[GEP:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[ADDR]], [[CST]](s64) +; CHECK: [[GEP:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[ADDR]], [[CST]](s64) ; CHECK: [[HI:%[0-9]+]]:_(s64) = G_LOAD [[GEP]](p0) :: (load (s64) from %ir.ptr + 8) ; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy-forced.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy-forced.mir index 22a5537..1c0fc3f 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy-forced.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy-forced.mir @@ -38,44 +38,44 @@ body: | ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4) ; CHECK-NEXT: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64) ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %ir.1 + 16, align 4) - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64) ; CHECK-NEXT: G_STORE [[LOAD1]](s128), [[PTR_ADD1]](p0) :: (store (s128) into %ir.0 + 16, align 4) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64) ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD2]](p0) :: (load (s128) from %ir.1 + 32, align 4) - ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64) ; CHECK-NEXT: G_STORE [[LOAD2]](s128), [[PTR_ADD3]](p0) :: (store (s128) into %ir.0 + 32, align 4) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 - ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s64) ; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD4]](p0) :: (load (s128) from %ir.1 + 48, align 4) - ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64) ; CHECK-NEXT: G_STORE [[LOAD3]](s128), [[PTR_ADD5]](p0) :: (store (s128) into %ir.0 + 48, align 4) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64 - ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C3]](s64) ; CHECK-NEXT: [[LOAD4:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD6]](p0) :: (load (s128) from %ir.1 + 64, align 4) - ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64) ; CHECK-NEXT: G_STORE [[LOAD4]](s128), [[PTR_ADD7]](p0) :: (store (s128) into %ir.0 + 64, align 4) ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 80 - ; CHECK-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C4]](s64) + ; CHECK-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C4]](s64) ; CHECK-NEXT: [[LOAD5:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD8]](p0) :: (load (s128) from %ir.1 + 80, align 4) - ; CHECK-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C4]](s64) + ; CHECK-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64) ; CHECK-NEXT: G_STORE [[LOAD5]](s128), [[PTR_ADD9]](p0) :: (store (s128) into %ir.0 + 80, align 4) ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 96 - ; CHECK-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C5]](s64) + ; CHECK-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C5]](s64) ; CHECK-NEXT: [[LOAD6:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD10]](p0) :: (load (s128) from %ir.1 + 96, align 4) - ; CHECK-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C5]](s64) + ; CHECK-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64) ; CHECK-NEXT: G_STORE [[LOAD6]](s128), [[PTR_ADD11]](p0) :: (store (s128) into %ir.0 + 96, align 4) ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 112 - ; CHECK-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C6]](s64) + ; CHECK-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C6]](s64) ; CHECK-NEXT: [[LOAD7:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD12]](p0) :: (load (s128) from %ir.1 + 112, align 4) - ; CHECK-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C6]](s64) + ; CHECK-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64) ; CHECK-NEXT: G_STORE [[LOAD7]](s128), [[PTR_ADD13]](p0) :: (store (s128) into %ir.0 + 112, align 4) ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 127 - ; CHECK-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C7]](s64) + ; CHECK-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C7]](s64) ; CHECK-NEXT: [[LOAD8:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD14]](p0) :: (load (s128) from %ir.1 + 127, align 1, basealign 4) - ; CHECK-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C7]](s64) + ; CHECK-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64) ; CHECK-NEXT: G_STORE [[LOAD8]](s128), [[PTR_ADD15]](p0) :: (store (s128) into %ir.0 + 127, align 1, basealign 4) ; CHECK-NEXT: RET_ReallyLR %0:_(p0) = COPY $x0 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir index 6039b75..97a0417 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir @@ -111,24 +111,24 @@ body: | ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4) ; CHECK-NEXT: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64) ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %ir.1 + 16, align 4) - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64) ; CHECK-NEXT: G_STORE [[LOAD1]](s128), [[PTR_ADD1]](p0) :: (store (s128) into %ir.0 + 16, align 4) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64) ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD2]](p0) :: (load (s128) from %ir.1 + 32, align 4) - ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64) ; CHECK-NEXT: G_STORE [[LOAD2]](s128), [[PTR_ADD3]](p0) :: (store (s128) into %ir.0 + 32, align 4) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 - ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s64) ; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD4]](p0) :: (load (s128) from %ir.1 + 48, align 4) - ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64) ; CHECK-NEXT: G_STORE [[LOAD3]](s128), [[PTR_ADD5]](p0) :: (store (s128) into %ir.0 + 48, align 4) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64 - ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C3]](s64) ; CHECK-NEXT: [[LOAD4:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD6]](p0) :: (load (s64) from %ir.1 + 64, align 4) - ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64) ; CHECK-NEXT: G_STORE [[LOAD4]](s64), [[PTR_ADD7]](p0) :: (store (s64) into %ir.0 + 64, align 4) ; CHECK-NEXT: RET_ReallyLR %0:_(p0) = COPY $x0 @@ -159,24 +159,24 @@ body: | ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4) ; CHECK-NEXT: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64) ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %ir.1 + 16, align 4) - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64) ; CHECK-NEXT: G_STORE [[LOAD1]](s128), [[PTR_ADD1]](p0) :: (store (s128) into %ir.0 + 16, align 4) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64) ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD2]](p0) :: (load (s128) from %ir.1 + 32, align 4) - ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64) ; CHECK-NEXT: G_STORE [[LOAD2]](s128), [[PTR_ADD3]](p0) :: (store (s128) into %ir.0 + 32, align 4) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 - ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s64) ; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD4]](p0) :: (load (s128) from %ir.1 + 48, align 4) - ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64) ; CHECK-NEXT: G_STORE [[LOAD3]](s128), [[PTR_ADD5]](p0) :: (store (s128) into %ir.0 + 48, align 4) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64 - ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C3]](s64) ; CHECK-NEXT: [[LOAD4:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD6]](p0) :: (load (s64) from %ir.1 + 64, align 4) - ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64) ; CHECK-NEXT: G_STORE [[LOAD4]](s64), [[PTR_ADD7]](p0) :: (store (s64) into %ir.0 + 64, align 4) ; CHECK-NEXT: RET_ReallyLR %0:_(p0) = COPY $x0 @@ -235,44 +235,44 @@ body: | ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4) ; CHECK-NEXT: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64) ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %ir.1 + 16, align 4) - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64) ; CHECK-NEXT: G_STORE [[LOAD1]](s128), [[PTR_ADD1]](p0) :: (store (s128) into %ir.0 + 16, align 4) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64) ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD2]](p0) :: (load (s128) from %ir.1 + 32, align 4) - ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64) ; CHECK-NEXT: G_STORE [[LOAD2]](s128), [[PTR_ADD3]](p0) :: (store (s128) into %ir.0 + 32, align 4) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 - ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s64) ; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD4]](p0) :: (load (s128) from %ir.1 + 48, align 4) - ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64) ; CHECK-NEXT: G_STORE [[LOAD3]](s128), [[PTR_ADD5]](p0) :: (store (s128) into %ir.0 + 48, align 4) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64 - ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C3]](s64) ; CHECK-NEXT: [[LOAD4:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD6]](p0) :: (load (s128) from %ir.1 + 64, align 4) - ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64) ; CHECK-NEXT: G_STORE [[LOAD4]](s128), [[PTR_ADD7]](p0) :: (store (s128) into %ir.0 + 64, align 4) ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 80 - ; CHECK-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C4]](s64) + ; CHECK-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C4]](s64) ; CHECK-NEXT: [[LOAD5:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD8]](p0) :: (load (s128) from %ir.1 + 80, align 4) - ; CHECK-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C4]](s64) + ; CHECK-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64) ; CHECK-NEXT: G_STORE [[LOAD5]](s128), [[PTR_ADD9]](p0) :: (store (s128) into %ir.0 + 80, align 4) ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 96 - ; CHECK-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C5]](s64) + ; CHECK-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C5]](s64) ; CHECK-NEXT: [[LOAD6:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD10]](p0) :: (load (s128) from %ir.1 + 96, align 4) - ; CHECK-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C5]](s64) + ; CHECK-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64) ; CHECK-NEXT: G_STORE [[LOAD6]](s128), [[PTR_ADD11]](p0) :: (store (s128) into %ir.0 + 96, align 4) ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 112 - ; CHECK-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C6]](s64) + ; CHECK-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C6]](s64) ; CHECK-NEXT: [[LOAD7:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD12]](p0) :: (load (s128) from %ir.1 + 112, align 4) - ; CHECK-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C6]](s64) + ; CHECK-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64) ; CHECK-NEXT: G_STORE [[LOAD7]](s128), [[PTR_ADD13]](p0) :: (store (s128) into %ir.0 + 112, align 4) ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 127 - ; CHECK-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C7]](s64) + ; CHECK-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C7]](s64) ; CHECK-NEXT: [[LOAD8:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD14]](p0) :: (load (s128) from %ir.1 + 127, align 1, basealign 4) - ; CHECK-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C7]](s64) + ; CHECK-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64) ; CHECK-NEXT: G_STORE [[LOAD8]](s128), [[PTR_ADD15]](p0) :: (store (s128) into %ir.0 + 127, align 1, basealign 4) ; CHECK-NEXT: RET_ReallyLR %0:_(p0) = COPY $x0 @@ -303,24 +303,24 @@ body: | ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p2) :: (load (s128) from %ir.1, align 4, addrspace 2) ; CHECK-NEXT: G_STORE [[LOAD]](s128), [[COPY]](p1) :: (store (s128) into %ir.0, align 4, addrspace 1) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p2) = nuw G_PTR_ADD [[COPY1]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p2) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64) ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p2) :: (load (s128) from %ir.1 + 16, align 4, addrspace 2) - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64) ; CHECK-NEXT: G_STORE [[LOAD1]](s128), [[PTR_ADD1]](p1) :: (store (s128) into %ir.0 + 16, align 4, addrspace 1) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p2) = nuw G_PTR_ADD [[COPY1]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p2) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64) ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD2]](p2) :: (load (s128) from %ir.1 + 32, align 4, addrspace 2) - ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw G_PTR_ADD [[COPY]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64) ; CHECK-NEXT: G_STORE [[LOAD2]](s128), [[PTR_ADD3]](p1) :: (store (s128) into %ir.0 + 32, align 4, addrspace 1) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 - ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p2) = nuw G_PTR_ADD [[COPY1]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p2) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s64) ; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD4]](p2) :: (load (s128) from %ir.1 + 48, align 4, addrspace 2) - ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw G_PTR_ADD [[COPY]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64) ; CHECK-NEXT: G_STORE [[LOAD3]](s128), [[PTR_ADD5]](p1) :: (store (s128) into %ir.0 + 48, align 4, addrspace 1) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64 - ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p2) = nuw G_PTR_ADD [[COPY1]], [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p2) = nuw inbounds G_PTR_ADD [[COPY1]], [[C3]](s64) ; CHECK-NEXT: [[LOAD4:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD6]](p2) :: (load (s64) from %ir.1 + 64, align 4, addrspace 2) - ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw G_PTR_ADD [[COPY]], [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64) ; CHECK-NEXT: G_STORE [[LOAD4]](s64), [[PTR_ADD7]](p1) :: (store (s64) into %ir.0 + 64, align 4, addrspace 1) ; CHECK-NEXT: RET_ReallyLR %0:_(p1) = COPY $x0 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir index 8808c9b..fc4fbac 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir @@ -89,17 +89,17 @@ body: | ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64) ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %ir.1 + 16, align 4) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64) ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD1]](p0) :: (load (s128) from %ir.1 + 32, align 4) ; CHECK-NEXT: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64) ; CHECK-NEXT: G_STORE [[LOAD1]](s128), [[PTR_ADD2]](p0) :: (store (s128) into %ir.0 + 16, align 4) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64) ; CHECK-NEXT: G_STORE [[LOAD2]](s128), [[PTR_ADD3]](p0) :: (store (s128) into %ir.0 + 32, align 4) ; CHECK-NEXT: RET_ReallyLR %0:_(p0) = COPY $x0 @@ -124,35 +124,35 @@ body: | ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64) ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %ir.1 + 16, align 4) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64) ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD1]](p0) :: (load (s128) from %ir.1 + 32, align 4) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 - ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s64) ; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD2]](p0) :: (load (s128) from %ir.1 + 48, align 4) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64 - ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C3]](s64) ; CHECK-NEXT: [[LOAD4:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD3]](p0) :: (load (s128) from %ir.1 + 64, align 4) ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 80 - ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C4]](s64) + ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C4]](s64) ; CHECK-NEXT: [[LOAD5:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD4]](p0) :: (load (s128) from %ir.1 + 80, align 4) ; CHECK-NEXT: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4) ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C5]](s64) + ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64) ; CHECK-NEXT: G_STORE [[LOAD1]](s128), [[PTR_ADD5]](p0) :: (store (s128) into %ir.0 + 16, align 4) ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C6]](s64) + ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C6]](s64) ; CHECK-NEXT: G_STORE [[LOAD2]](s128), [[PTR_ADD6]](p0) :: (store (s128) into %ir.0 + 32, align 4) ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 - ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C7]](s64) + ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C7]](s64) ; CHECK-NEXT: G_STORE [[LOAD3]](s128), [[PTR_ADD7]](p0) :: (store (s128) into %ir.0 + 48, align 4) ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 64 - ; CHECK-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C8]](s64) + ; CHECK-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C8]](s64) ; CHECK-NEXT: G_STORE [[LOAD4]](s128), [[PTR_ADD8]](p0) :: (store (s128) into %ir.0 + 64, align 4) ; CHECK-NEXT: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 80 - ; CHECK-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C9]](s64) + ; CHECK-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C9]](s64) ; CHECK-NEXT: G_STORE [[LOAD5]](s128), [[PTR_ADD9]](p0) :: (store (s128) into %ir.0 + 80, align 4) ; CHECK-NEXT: RET_ReallyLR %0:_(p0) = COPY $x0 @@ -177,23 +177,23 @@ body: | ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64) ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %ir.1 + 16, align 4) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64) ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD1]](p0) :: (load (s128) from %ir.1 + 32, align 4) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 - ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s64) ; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from %ir.1 + 48) ; CHECK-NEXT: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64) ; CHECK-NEXT: G_STORE [[LOAD1]](s128), [[PTR_ADD3]](p0) :: (store (s128) into %ir.0 + 16, align 4) ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C4]](s64) + ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64) ; CHECK-NEXT: G_STORE [[LOAD2]](s128), [[PTR_ADD4]](p0) :: (store (s128) into %ir.0 + 32, align 4) ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 - ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C5]](s64) + ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C5]](s64) ; CHECK-NEXT: G_STORE [[LOAD3]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %ir.0 + 48) ; CHECK-NEXT: RET_ReallyLR %0:_(p0) = COPY $x0 @@ -218,17 +218,17 @@ body: | ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p2) = COPY $x1 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p2) :: (load (s128) from %ir.1, align 4, addrspace 2) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p2) = nuw G_PTR_ADD [[COPY1]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p2) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64) ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p2) :: (load (s128) from %ir.1 + 16, align 4, addrspace 2) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p2) = nuw G_PTR_ADD [[COPY1]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p2) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64) ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD1]](p2) :: (load (s128) from %ir.1 + 32, align 4, addrspace 2) ; CHECK-NEXT: G_STORE [[LOAD]](s128), [[COPY]](p1) :: (store (s128) into %ir.0, align 4, addrspace 1) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw G_PTR_ADD [[COPY]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64) ; CHECK-NEXT: G_STORE [[LOAD1]](s128), [[PTR_ADD2]](p1) :: (store (s128) into %ir.0 + 16, align 4, addrspace 1) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw G_PTR_ADD [[COPY]], [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64) ; CHECK-NEXT: G_STORE [[LOAD2]](s128), [[PTR_ADD3]](p1) :: (store (s128) into %ir.0 + 32, align 4, addrspace 1) ; CHECK-NEXT: RET_ReallyLR %0:_(p1) = COPY $x0 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir index f96205c..b06cadf 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir @@ -100,7 +100,7 @@ body: | ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C]] ; CHECK-NEXT: G_STORE [[MUL]](s64), [[COPY]](p0) :: (store (s64) into %ir.dst, align 1) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64) ; CHECK-NEXT: G_STORE [[MUL]](s64), [[PTR_ADD]](p0) :: (store (s64) into %ir.dst + 8, align 1) ; CHECK-NEXT: RET_ReallyLR %0:_(p0) = COPY $x0 @@ -127,13 +127,13 @@ body: | ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) ; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[COPY]](p0) :: (store (<2 x s64>) into %ir.dst, align 1) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64) ; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into %ir.dst + 16, align 1) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64) ; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD1]](p0) :: (store (<2 x s64>) into %ir.dst + 32, align 1) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 - ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64) ; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD2]](p0) :: (store (<2 x s64>) into %ir.dst + 48, align 1) ; CHECK-NEXT: RET_ReallyLR %0:_(p0) = COPY $x0 @@ -160,7 +160,7 @@ body: | ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4629771061636907072 ; CHECK-NEXT: G_STORE [[C]](s64), [[COPY]](p0) :: (store (s64) into %ir.dst, align 1) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64) ; CHECK-NEXT: G_STORE [[C]](s64), [[PTR_ADD]](p0) :: (store (s64) into %ir.dst + 8, align 1) ; CHECK-NEXT: RET_ReallyLR %0:_(p0) = COPY $x0 @@ -190,13 +190,13 @@ body: | ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MUL]](s64), [[MUL]](s64) ; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[COPY]](p0) :: (store (<2 x s64>) into %ir.dst, align 1) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64) ; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into %ir.dst + 16, align 1) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64) ; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD1]](p0) :: (store (<2 x s64>) into %ir.dst + 32, align 1) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 44 - ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64) ; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD2]](p0) :: (store (<2 x s64>) into %ir.dst + 44, align 1) ; CHECK-NEXT: RET_ReallyLR %0:_(p0) = COPY $x0 @@ -222,11 +222,11 @@ body: | ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4629771061636907072 ; CHECK-NEXT: G_STORE [[C]](s64), [[COPY]](p0) :: (store (s64) into %ir.dst, align 1) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64) ; CHECK-NEXT: G_STORE [[C]](s64), [[PTR_ADD]](p0) :: (store (s64) into %ir.dst + 8, align 1) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 16448 ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64) ; CHECK-NEXT: G_STORE [[C2]](s16), [[PTR_ADD1]](p0) :: (store (s16) into %ir.dst + 16, align 1) ; CHECK-NEXT: RET_ReallyLR %0:_(p0) = COPY $x0 @@ -254,7 +254,7 @@ body: | ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C]] ; CHECK-NEXT: G_STORE [[MUL]](s64), [[COPY]](p0) :: (store (s64) into %ir.dst, align 1) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64) ; CHECK-NEXT: G_STORE [[MUL]](s64), [[PTR_ADD]](p0) :: (store (s64) into %ir.dst + 8, align 1) ; CHECK-NEXT: RET_ReallyLR %0:_(p0) = COPY $x0 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-small-memcpy.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-small-memcpy.mir index 282a4a5..7393091 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-small-memcpy.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-small-memcpy.mir @@ -46,9 +46,9 @@ body: | ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4) ; CHECK-NEXT: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64) ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %ir.1 + 16, align 4) - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64) ; CHECK-NEXT: G_STORE [[LOAD1]](s128), [[PTR_ADD1]](p0) :: (store (s128) into %ir.0 + 16, align 4) ; CHECK-NEXT: RET_ReallyLR %0:_(p0) = COPY $x0 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-and.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-and.mir index a92f39e..1a21064 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-and.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-and.mir @@ -32,11 +32,11 @@ body: | ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD %ptr(p0) :: (load (s64), align 16) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C1]](s64) ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0) ; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16) from unknown-address + 8, align 8) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64) ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 10, align 2) ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD1]](s32), [[DEF]](s32) @@ -48,7 +48,7 @@ body: | ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD %ptr(p0) :: (load (s64), align 16) ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0) ; CHECK-NEXT: [[ZEXTLOAD1:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[COPY1]](p0) :: (load (s16) from unknown-address + 8, align 8) - ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s64) ; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s8) from unknown-address + 10, align 2) ; CHECK-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD3]](s32), [[DEF]](s32) ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[MV1]], [[C3]](s64) @@ -61,7 +61,7 @@ body: | ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[AND1]](s64) ; CHECK-NEXT: G_STORE [[COPY2]](s64), %ptr(p0) :: (store (s64), align 16) ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[TRUNC]], [[C3]](s64) - ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[PTR_ADD]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C2]](s64) ; CHECK-NEXT: G_STORE [[TRUNC]](s32), [[PTR_ADD]](p0) :: (store (s16) into unknown-address + 8, align 8) ; CHECK-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD3]](p0) :: (store (s8) into unknown-address + 10, align 2) %ptr:_(p0) = COPY $x0 @@ -96,16 +96,16 @@ body: | ; CHECK-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[AND4]], [[C1]] ; CHECK-NEXT: G_STORE [[AND5]](s64), %ptr(p0) :: (store (s64), align 64) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C2]](s64) ; CHECK-NEXT: G_STORE [[AND6]](s64), [[PTR_ADD]](p0) :: (store (s64) into unknown-address + 8) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C3]](s64) ; CHECK-NEXT: G_STORE [[AND7]](s64), [[PTR_ADD1]](p0) :: (store (s64) into unknown-address + 16, align 16) ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 - ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C4]](s64) + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C4]](s64) ; CHECK-NEXT: G_STORE [[AND8]](s64), [[PTR_ADD2]](p0) :: (store (s64) into unknown-address + 24) ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C5]](s64) + ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C5]](s64) ; CHECK-NEXT: G_STORE [[AND9]](s64), [[PTR_ADD3]](p0) :: (store (s64) into unknown-address + 32, align 32) ; CHECK-NEXT: RET_ReallyLR implicit $x0 %a:_(s318) = G_IMPLICIT_DEF @@ -140,16 +140,16 @@ body: | ; CHECK-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[AND4]], [[C1]] ; CHECK-NEXT: G_STORE [[AND5]](s64), %ptr(p0) :: (store (s64), align 64) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C2]](s64) ; CHECK-NEXT: G_STORE [[AND6]](s64), [[PTR_ADD]](p0) :: (store (s64) into unknown-address + 8) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C3]](s64) ; CHECK-NEXT: G_STORE [[AND7]](s64), [[PTR_ADD1]](p0) :: (store (s64) into unknown-address + 16, align 16) ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 - ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C4]](s64) + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C4]](s64) ; CHECK-NEXT: G_STORE [[AND8]](s64), [[PTR_ADD2]](p0) :: (store (s64) into unknown-address + 24) ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C5]](s64) + ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C5]](s64) ; CHECK-NEXT: G_STORE [[AND9]](s64), [[PTR_ADD3]](p0) :: (store (s64) into unknown-address + 32, align 32) ; CHECK-NEXT: RET_ReallyLR implicit $x0 %a:_(s318) = G_IMPLICIT_DEF diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-bswap.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-bswap.mir index da67bc9..2378401 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-bswap.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-bswap.mir @@ -195,13 +195,13 @@ body: | ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C1]](s64) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64) ; CHECK-NEXT: G_STORE [[UV]](s64), [[COPY]](p0) :: (store (s32), align 16) ; CHECK-NEXT: G_STORE [[LSHR1]](s64), [[PTR_ADD]](p0) :: (store (s16) into unknown-address + 4, align 4) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 6 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64) ; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[C1]](s64) - ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[PTR_ADD1]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD1]], [[C2]](s64) ; CHECK-NEXT: G_STORE [[UV1]](s64), [[PTR_ADD1]](p0) :: (store (s32) into unknown-address + 6, align 2) ; CHECK-NEXT: G_STORE [[LSHR2]](s64), [[PTR_ADD2]](p0) :: (store (s16) into unknown-address + 10) ; CHECK-NEXT: RET_ReallyLR diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-constant.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-constant.mir index e294ea2..c301e76 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-constant.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-constant.mir @@ -97,16 +97,16 @@ body: | ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[C1]], [[C3]] ; CHECK-NEXT: G_STORE [[AND]](s64), %ptr(p0) :: (store (s64), align 64) ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C4]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C4]](s64) ; CHECK-NEXT: G_STORE [[AND1]](s64), [[PTR_ADD]](p0) :: (store (s64) into unknown-address + 8) ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C5]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C5]](s64) ; CHECK-NEXT: G_STORE [[AND2]](s64), [[PTR_ADD1]](p0) :: (store (s64) into unknown-address + 16, align 16) ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 - ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C6]](s64) + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C6]](s64) ; CHECK-NEXT: G_STORE [[AND3]](s64), [[PTR_ADD2]](p0) :: (store (s64) into unknown-address + 24) ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C7]](s64) + ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C7]](s64) ; CHECK-NEXT: G_STORE [[AND4]](s64), [[PTR_ADD3]](p0) :: (store (s64) into unknown-address + 32, align 32) ; CHECK-NEXT: RET_ReallyLR implicit $w0 %cst:_(s318) = G_CONSTANT i318 1234 @@ -136,10 +136,10 @@ body: | ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[AND2]](s64), 0 ; CHECK-NEXT: G_STORE [[COPY]](s64), %ptr(p0) :: (store (s64), align 32) ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C4]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C4]](s64) ; CHECK-NEXT: G_STORE [[COPY1]](s64), [[PTR_ADD]](p0) :: (store (s64) into unknown-address + 8) ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C5]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C5]](s64) ; CHECK-NEXT: G_STORE [[EXTRACT]](s32), [[PTR_ADD1]](p0) :: (store (s32) into unknown-address + 16, align 16) ; CHECK-NEXT: RET_ReallyLR implicit $w0 %cst:_(s158) = G_CONSTANT i158 1234 @@ -170,10 +170,10 @@ body: | ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(s16) = G_EXTRACT [[AND2]](s64), 0 ; CHECK-NEXT: G_STORE [[COPY]](s64), %ptr(p0) :: (store (s64), align 32) ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C4]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C4]](s64) ; CHECK-NEXT: G_STORE [[COPY1]](s64), [[PTR_ADD]](p0) :: (store (s64) into unknown-address + 8) ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C5]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C5]](s64) ; CHECK-NEXT: G_STORE [[EXTRACT]](s16), [[PTR_ADD1]](p0) :: (store (s16) into unknown-address + 16, align 16) ; CHECK-NEXT: RET_ReallyLR implicit $w0 %cst:_(s142) = G_CONSTANT i142 1234 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-extract-vector-elt.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-extract-vector-elt.mir index 16cfb72..dafc304 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-extract-vector-elt.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-extract-vector-elt.mir @@ -328,7 +328,7 @@ body: | ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0 ; CHECK-NEXT: G_STORE [[COPY]](<2 x s64>), [[FRAME_INDEX]](p0) :: (store (<2 x s64>) into %stack.0, align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[FRAME_INDEX]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C]](s64) ; CHECK-NEXT: G_STORE [[COPY1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into %stack.0 + 16, basealign 32) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND %idx, [[C1]] @@ -426,7 +426,7 @@ body: | ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0 ; CHECK-NEXT: G_STORE [[COPY]](<4 x s32>), [[FRAME_INDEX]](p0) :: (store (<4 x s32>) into %stack.0, align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[FRAME_INDEX]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C]](s64) ; CHECK-NEXT: G_STORE [[COPY1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into %stack.0 + 16, basealign 32) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 7 ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND %idxprom, [[C1]] @@ -460,7 +460,7 @@ body: | ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0 ; CHECK-NEXT: G_STORE [[COPY]](<8 x s16>), [[FRAME_INDEX]](p0) :: (store (<8 x s16>) into %stack.0, align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[FRAME_INDEX]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C]](s64) ; CHECK-NEXT: G_STORE [[COPY1]](<8 x s16>), [[PTR_ADD]](p0) :: (store (<8 x s16>) into %stack.0 + 16, basealign 32) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 15 ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND %idxprom, [[C1]] @@ -495,7 +495,7 @@ body: | ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[DEF]](<2 x p0>) ; CHECK-NEXT: G_STORE [[BITCAST]](<2 x s64>), [[FRAME_INDEX]](p0) :: (store (<2 x s64>) into %stack.0, align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[FRAME_INDEX]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[FRAME_INDEX]], [[C]](s64) ; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[DEF]](<2 x p0>) ; CHECK-NEXT: G_STORE [[BITCAST1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into %stack.0 + 16, basealign 32) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fpext.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fpext.mir index 01caebe..1c10e08 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fpext.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fpext.mir @@ -22,7 +22,7 @@ body: | ; CHECK-NEXT: [[FPEXT1:%[0-9]+]]:_(<2 x s64>) = G_FPEXT [[UV1]](<2 x s32>) ; CHECK-NEXT: G_STORE [[FPEXT]](<2 x s64>), [[COPY1]](p0) :: (store (<2 x s64>), align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64) ; CHECK-NEXT: G_STORE [[FPEXT1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16) ; CHECK-NEXT: RET_ReallyLR %0:_(<4 x s32>) = COPY $q0 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fptrunc.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fptrunc.mir index 92b8339..a19ab0b 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fptrunc.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fptrunc.mir @@ -135,7 +135,7 @@ body: | ; CHECK-NEXT: [[CONCAT_VECTORS1:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[FPTRUNC2]](<2 x s32>), [[FPTRUNC3]](<2 x s32>) ; CHECK-NEXT: G_STORE [[CONCAT_VECTORS]](<4 x s32>), [[COPY5]](p0) :: (store (<4 x s32>), align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY5]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY5]], [[C]](s64) ; CHECK-NEXT: G_STORE [[CONCAT_VECTORS1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16) ; CHECK-NEXT: RET_ReallyLR %2:_(<2 x s64>) = COPY $q0 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir index 89b9fa5..858a5a2 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-insert-vector-elt.mir @@ -258,10 +258,10 @@ body: | ; CHECK-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UITOFP]](<4 x s32>) ; CHECK-NEXT: G_STORE [[UV10]](s32), [[COPY]](p0) :: (store (s32), align 16) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C3]](s64) ; CHECK-NEXT: G_STORE [[UV11]](s32), [[PTR_ADD]](p0) :: (store (s32) into unknown-address + 4) ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C4]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C4]](s64) ; CHECK-NEXT: G_STORE [[UV12]](s32), [[PTR_ADD1]](p0) :: (store (s32) into unknown-address + 8, align 8) ; CHECK-NEXT: G_BR %bb.1 bb.1: diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store-vector.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store-vector.mir index bb28644..29a3e38 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store-vector.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store-vector.mir @@ -46,7 +46,7 @@ body: | ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) ; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[COPY]](p0) :: (store (<2 x s64>)) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64) ; CHECK-NEXT: G_STORE [[BUILD_VECTOR1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16) ; CHECK-NEXT: RET_ReallyLR %0:_(p0) = COPY $x8 @@ -72,7 +72,7 @@ body: | ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) ; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[COPY]](p0) :: (store (<2 x s64>)) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64) ; CHECK-NEXT: G_STORE [[BUILD_VECTOR1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16) ; CHECK-NEXT: RET_ReallyLR %0:_(p0) = COPY $x8 @@ -95,7 +95,7 @@ body: | ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 ; CHECK-NEXT: G_STORE [[C]](s64), [[COPY]](p0) :: (store (s64), align 16) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64) ; CHECK-NEXT: G_STORE [[C1]](s64), [[PTR_ADD]](p0) :: (store (s64) into unknown-address + 8) ; CHECK-NEXT: RET_ReallyLR %0:_(p0) = COPY $x8 @@ -140,7 +140,7 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x8 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>)) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64) ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16) ; CHECK-NEXT: $q0 = COPY [[LOAD]](<2 x s64>) ; CHECK-NEXT: $q1 = COPY [[LOAD1]](<2 x s64>) @@ -166,7 +166,7 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x8 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>)) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64) ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16) ; CHECK-NEXT: $q0 = COPY [[LOAD]](<2 x s64>) ; CHECK-NEXT: $q1 = COPY [[LOAD1]](<2 x s64>) diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir index 0351562..2c326902 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir @@ -332,7 +332,7 @@ body: | ; CHECK-NEXT: %ptr:_(p0) = COPY $x0 ; CHECK-NEXT: G_STORE [[DEF]](<16 x s8>), %ptr(p0) :: (store (<16 x s8>), align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64) ; CHECK-NEXT: G_STORE [[DEF]](<16 x s8>), [[PTR_ADD]](p0) :: (store (<16 x s8>) into unknown-address + 16) ; CHECK-NEXT: RET_ReallyLR %val:_(<32 x s8>) = G_IMPLICIT_DEF @@ -355,7 +355,7 @@ body: | ; CHECK-NEXT: %ptr:_(p0) = COPY $x0 ; CHECK-NEXT: G_STORE [[DEF]](<8 x s16>), %ptr(p0) :: (store (<8 x s16>), align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64) ; CHECK-NEXT: G_STORE [[DEF]](<8 x s16>), [[PTR_ADD]](p0) :: (store (<8 x s16>) into unknown-address + 16) ; CHECK-NEXT: RET_ReallyLR %val:_(<16 x s16>) = G_IMPLICIT_DEF @@ -378,7 +378,7 @@ body: | ; CHECK-NEXT: %ptr:_(p0) = COPY $x0 ; CHECK-NEXT: G_STORE [[DEF]](<4 x s32>), %ptr(p0) :: (store (<4 x s32>), align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64) ; CHECK-NEXT: G_STORE [[DEF]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16) ; CHECK-NEXT: RET_ReallyLR %val:_(<8 x s32>) = G_IMPLICIT_DEF @@ -401,7 +401,7 @@ body: | ; CHECK-NEXT: %ptr:_(p0) = COPY $x0 ; CHECK-NEXT: G_STORE [[DEF]](<2 x s64>), %ptr(p0) :: (store (<2 x s64>), align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64) ; CHECK-NEXT: G_STORE [[DEF]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16) ; CHECK-NEXT: RET_ReallyLR %val:_(<4 x s64>) = G_IMPLICIT_DEF @@ -423,10 +423,10 @@ body: | ; CHECK-NEXT: %ptr:_(p0) = COPY $x0 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<16 x s8>) = G_LOAD %ptr(p0) :: (load (<16 x s8>), align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64) ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[PTR_ADD]](p0) :: (load (<16 x s8>) from unknown-address + 16) ; CHECK-NEXT: G_STORE [[LOAD]](<16 x s8>), %ptr(p0) :: (store (<16 x s8>), align 32) - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64) ; CHECK-NEXT: G_STORE [[LOAD1]](<16 x s8>), [[PTR_ADD1]](p0) :: (store (<16 x s8>) into unknown-address + 16) ; CHECK-NEXT: RET_ReallyLR %ptr:_(p0) = COPY $x0 @@ -448,10 +448,10 @@ body: | ; CHECK-NEXT: %ptr:_(p0) = COPY $x0 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD %ptr(p0) :: (load (<8 x s16>), align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64) ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[PTR_ADD]](p0) :: (load (<8 x s16>) from unknown-address + 16) ; CHECK-NEXT: G_STORE [[LOAD]](<8 x s16>), %ptr(p0) :: (store (<8 x s16>), align 32) - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64) ; CHECK-NEXT: G_STORE [[LOAD1]](<8 x s16>), [[PTR_ADD1]](p0) :: (store (<8 x s16>) into unknown-address + 16) ; CHECK-NEXT: RET_ReallyLR %ptr:_(p0) = COPY $x0 @@ -473,10 +473,10 @@ body: | ; CHECK-NEXT: %ptr:_(p0) = COPY $x0 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD %ptr(p0) :: (load (<4 x s32>), align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64) ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16) ; CHECK-NEXT: G_STORE [[LOAD]](<4 x s32>), %ptr(p0) :: (store (<4 x s32>), align 32) - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64) ; CHECK-NEXT: G_STORE [[LOAD1]](<4 x s32>), [[PTR_ADD1]](p0) :: (store (<4 x s32>) into unknown-address + 16) ; CHECK-NEXT: RET_ReallyLR %ptr:_(p0) = COPY $x0 @@ -498,10 +498,10 @@ body: | ; CHECK-NEXT: %ptr:_(p0) = COPY $x0 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD %ptr(p0) :: (load (<2 x s64>), align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64) ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16) ; CHECK-NEXT: G_STORE [[LOAD]](<2 x s64>), %ptr(p0) :: (store (<2 x s64>), align 32) - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64) ; CHECK-NEXT: G_STORE [[LOAD1]](<2 x s64>), [[PTR_ADD1]](p0) :: (store (<2 x s64>) into unknown-address + 16) ; CHECK-NEXT: RET_ReallyLR %ptr:_(p0) = COPY $x0 @@ -549,10 +549,10 @@ body: | ; CHECK-NEXT: %ptr:_(p0) = COPY $x0 ; CHECK-NEXT: G_STORE [[DEF]](<2 x s64>), %ptr(p0) :: (store (<2 x s64>)) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64) ; CHECK-NEXT: G_STORE [[DEF]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C1]](s64) ; CHECK-NEXT: G_STORE [[DEF]](<2 x s64>), [[PTR_ADD1]](p0) :: (store (<2 x s64>) into unknown-address + 32) ; CHECK-NEXT: RET_ReallyLR %val:_(<6 x s64>) = G_IMPLICIT_DEF @@ -575,7 +575,7 @@ body: | ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<2 x s16>) ; CHECK-NEXT: G_STORE [[UV]](s16), [[COPY]](p0) :: (store (s16), align 4) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64) ; CHECK-NEXT: G_STORE [[UV1]](s16), [[PTR_ADD]](p0) :: (store (s16) into unknown-address + 2) ; CHECK-NEXT: RET_ReallyLR %0:_(p0) = COPY $x0 @@ -597,7 +597,7 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p0) :: (load (s16), align 4) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64) ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 2) ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s16) ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD1]](s16) @@ -626,10 +626,10 @@ body: | ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD %ptr(p0) :: (load (s64), align 16) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C1]](s64) ; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[PTR_ADD]](p0) :: (load (s16) from unknown-address + 8, align 8) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[PTR_ADD]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C2]](s64) ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s8) from unknown-address + 10, align 2) ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD1]](s32), [[DEF]](s32) @@ -641,9 +641,9 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY [[OR1]](s64) ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[OR2]](s64) ; CHECK-NEXT: G_STORE [[COPY]](s64), %ptr(p0) :: (store (s64), align 16) - ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C1]](s64) ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[TRUNC]], [[C3]](s64) - ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[PTR_ADD2]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD2]], [[C2]](s64) ; CHECK-NEXT: G_STORE [[TRUNC]](s32), [[PTR_ADD2]](p0) :: (store (s16) into unknown-address + 8, align 8) ; CHECK-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD3]](p0) :: (store (s8) into unknown-address + 10, align 2) ; CHECK-NEXT: RET_ReallyLR @@ -710,19 +710,19 @@ body: | ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD %ptr(p0) :: (load (<2 x s64>), align 64) ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[LOAD]](<2 x s64>) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64) ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16) ; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[LOAD1]](<2 x s64>) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C1]](s64) ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD1]](p0) :: (load (<2 x s64>) from unknown-address + 32, align 32) ; CHECK-NEXT: [[BITCAST2:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[LOAD2]](<2 x s64>) ; CHECK-NEXT: [[BITCAST3:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[BITCAST]](<2 x p0>) ; CHECK-NEXT: G_STORE [[BITCAST3]](<2 x s64>), %ptr(p0) :: (store (<2 x s64>), align 64) - ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C]](s64) ; CHECK-NEXT: [[BITCAST4:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[BITCAST1]](<2 x p0>) ; CHECK-NEXT: G_STORE [[BITCAST4]](<2 x s64>), [[PTR_ADD2]](p0) :: (store (<2 x s64>) into unknown-address + 16) - ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C1]](s64) ; CHECK-NEXT: [[BITCAST5:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[BITCAST2]](<2 x p0>) ; CHECK-NEXT: G_STORE [[BITCAST5]](<2 x s64>), [[PTR_ADD3]](p0) :: (store (<2 x s64>) into unknown-address + 32, align 32) ; CHECK-NEXT: RET_ReallyLR diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir index b9c0845..30afd7e 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir @@ -61,7 +61,7 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK-NEXT: G_STORE [[SMIN]](<16 x s8>), [[COPY]](p0) :: (store (<16 x s8>), align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64) ; CHECK-NEXT: G_STORE [[SMIN1]](<16 x s8>), [[PTR_ADD]](p0) :: (store (<16 x s8>) into unknown-address + 16) %vec:_(<32 x s8>) = G_IMPLICIT_DEF %vec1:_(<32 x s8>) = G_IMPLICIT_DEF @@ -130,7 +130,7 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK-NEXT: G_STORE [[SMIN]](<8 x s16>), [[COPY]](p0) :: (store (<8 x s16>), align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64) ; CHECK-NEXT: G_STORE [[SMIN1]](<8 x s16>), [[PTR_ADD]](p0) :: (store (<8 x s16>) into unknown-address + 16) %vec:_(<16 x s16>) = G_IMPLICIT_DEF %vec1:_(<16 x s16>) = G_IMPLICIT_DEF @@ -199,7 +199,7 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK-NEXT: G_STORE [[SMIN]](<4 x s32>), [[COPY]](p0) :: (store (<4 x s32>), align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64) ; CHECK-NEXT: G_STORE [[SMIN1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16) %vec:_(<8 x s32>) = G_IMPLICIT_DEF %vec1:_(<8 x s32>) = G_IMPLICIT_DEF @@ -262,7 +262,7 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK-NEXT: G_STORE [[OR]](<2 x s64>), [[COPY]](p0) :: (store (<2 x s64>), align 32) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64) ; CHECK-NEXT: G_STORE [[OR1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16) %vec:_(<4 x s64>) = G_IMPLICIT_DEF %vec1:_(<4 x s64>) = G_IMPLICIT_DEF @@ -331,7 +331,7 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK-NEXT: G_STORE [[UMIN]](<16 x s8>), [[COPY]](p0) :: (store (<16 x s8>), align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64) ; CHECK-NEXT: G_STORE [[UMIN1]](<16 x s8>), [[PTR_ADD]](p0) :: (store (<16 x s8>) into unknown-address + 16) %vec:_(<32 x s8>) = G_IMPLICIT_DEF %vec1:_(<32 x s8>) = G_IMPLICIT_DEF @@ -400,7 +400,7 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK-NEXT: G_STORE [[UMIN]](<8 x s16>), [[COPY]](p0) :: (store (<8 x s16>), align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64) ; CHECK-NEXT: G_STORE [[UMIN1]](<8 x s16>), [[PTR_ADD]](p0) :: (store (<8 x s16>) into unknown-address + 16) %vec:_(<16 x s16>) = G_IMPLICIT_DEF %vec1:_(<16 x s16>) = G_IMPLICIT_DEF @@ -469,7 +469,7 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK-NEXT: G_STORE [[UMIN]](<4 x s32>), [[COPY]](p0) :: (store (<4 x s32>), align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64) ; CHECK-NEXT: G_STORE [[UMIN1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16) %vec:_(<8 x s32>) = G_IMPLICIT_DEF %vec1:_(<8 x s32>) = G_IMPLICIT_DEF @@ -532,7 +532,7 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK-NEXT: G_STORE [[OR]](<2 x s64>), [[COPY]](p0) :: (store (<2 x s64>), align 32) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64) ; CHECK-NEXT: G_STORE [[OR1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16) %vec:_(<4 x s64>) = G_IMPLICIT_DEF %vec1:_(<4 x s64>) = G_IMPLICIT_DEF @@ -623,7 +623,7 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK-NEXT: G_STORE [[SMAX]](<16 x s8>), [[COPY]](p0) :: (store (<16 x s8>), align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64) ; CHECK-NEXT: G_STORE [[SMAX1]](<16 x s8>), [[PTR_ADD]](p0) :: (store (<16 x s8>) into unknown-address + 16) %vec:_(<32 x s8>) = G_IMPLICIT_DEF %vec1:_(<32 x s8>) = G_IMPLICIT_DEF @@ -670,7 +670,7 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK-NEXT: G_STORE [[SMAX]](<8 x s16>), [[COPY]](p0) :: (store (<8 x s16>), align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64) ; CHECK-NEXT: G_STORE [[SMAX1]](<8 x s16>), [[PTR_ADD]](p0) :: (store (<8 x s16>) into unknown-address + 16) %vec:_(<16 x s16>) = G_IMPLICIT_DEF %vec1:_(<16 x s16>) = G_IMPLICIT_DEF @@ -739,7 +739,7 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK-NEXT: G_STORE [[SMAX]](<4 x s32>), [[COPY]](p0) :: (store (<4 x s32>), align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64) ; CHECK-NEXT: G_STORE [[SMAX1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16) %vec:_(<8 x s32>) = G_IMPLICIT_DEF %vec1:_(<8 x s32>) = G_IMPLICIT_DEF @@ -802,7 +802,7 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK-NEXT: G_STORE [[OR]](<2 x s64>), [[COPY]](p0) :: (store (<2 x s64>), align 32) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64) ; CHECK-NEXT: G_STORE [[OR1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16) %vec:_(<4 x s64>) = G_IMPLICIT_DEF %vec1:_(<4 x s64>) = G_IMPLICIT_DEF @@ -871,7 +871,7 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK-NEXT: G_STORE [[UMAX]](<16 x s8>), [[COPY]](p0) :: (store (<16 x s8>), align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64) ; CHECK-NEXT: G_STORE [[UMAX1]](<16 x s8>), [[PTR_ADD]](p0) :: (store (<16 x s8>) into unknown-address + 16) %vec:_(<32 x s8>) = G_IMPLICIT_DEF %vec1:_(<32 x s8>) = G_IMPLICIT_DEF @@ -940,7 +940,7 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK-NEXT: G_STORE [[UMAX]](<8 x s16>), [[COPY]](p0) :: (store (<8 x s16>), align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64) ; CHECK-NEXT: G_STORE [[UMAX1]](<8 x s16>), [[PTR_ADD]](p0) :: (store (<8 x s16>) into unknown-address + 16) %vec:_(<16 x s16>) = G_IMPLICIT_DEF %vec1:_(<16 x s16>) = G_IMPLICIT_DEF @@ -1009,7 +1009,7 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK-NEXT: G_STORE [[UMAX]](<4 x s32>), [[COPY]](p0) :: (store (<4 x s32>), align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64) ; CHECK-NEXT: G_STORE [[UMAX1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16) %vec:_(<8 x s32>) = G_IMPLICIT_DEF %vec1:_(<8 x s32>) = G_IMPLICIT_DEF @@ -1072,7 +1072,7 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 ; CHECK-NEXT: G_STORE [[OR]](<2 x s64>), [[COPY]](p0) :: (store (<2 x s64>), align 32) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64) ; CHECK-NEXT: G_STORE [[OR1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16) %vec:_(<4 x s64>) = G_IMPLICIT_DEF %vec1:_(<4 x s64>) = G_IMPLICIT_DEF diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-non-pow2-load-store.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-non-pow2-load-store.mir index 4c9652b..b6488e9 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-non-pow2-load-store.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-non-pow2-load-store.mir @@ -16,13 +16,13 @@ body: | ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16), align 4) ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64) ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from unknown-address + 2, align 2) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C2]](s64) ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]] ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[OR]], [[C2]](s64) - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C1]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64) ; CHECK-NEXT: G_STORE [[OR]](s32), [[COPY1]](p0) :: (store (s16), align 4) ; CHECK-NEXT: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p0) :: (store (s8) into unknown-address + 2, align 2) ; CHECK-NEXT: $w0 = COPY [[C]](s32) @@ -54,13 +54,13 @@ body: | ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[C]], [[C1]](s64) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64) ; CHECK-NEXT: G_STORE [[C]](s64), [[COPY]](p0) :: (store (s32), align 8) ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[TRUNC]], [[C3]](s64) ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[PTR_ADD]], [[C4]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[PTR_ADD]], [[C4]](s64) ; CHECK-NEXT: G_STORE [[TRUNC]](s32), [[PTR_ADD]](p0) :: (store (s16) into unknown-address + 4, align 4) ; CHECK-NEXT: G_STORE [[LSHR1]](s32), [[PTR_ADD1]](p0) :: (store (s8) into unknown-address + 6, align 2) ; CHECK-NEXT: RET_ReallyLR @@ -91,16 +91,16 @@ body: | ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[DEF]], [[C1]] ; CHECK-NEXT: G_STORE [[AND]](s64), %ptr(p0) :: (store (s64)) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C2]](s64) ; CHECK-NEXT: G_STORE [[AND1]](s64), [[PTR_ADD]](p0) :: (store (s64) into unknown-address + 8) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C3]](s64) ; CHECK-NEXT: G_STORE [[AND2]](s64), [[PTR_ADD1]](p0) :: (store (s64) into unknown-address + 16) ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 - ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C4]](s64) + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C4]](s64) ; CHECK-NEXT: G_STORE [[AND3]](s64), [[PTR_ADD2]](p0) :: (store (s64) into unknown-address + 24) ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C5]](s64) + ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C5]](s64) ; CHECK-NEXT: G_STORE [[AND4]](s64), [[PTR_ADD3]](p0) :: (store (s64) into unknown-address + 32) ; CHECK-NEXT: RET_ReallyLR %ptr:_(p0) = COPY $x0 @@ -130,10 +130,10 @@ body: | ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[AND2]](s64), 0 ; CHECK-NEXT: G_STORE [[COPY]](s64), %ptr(p0) :: (store (s64)) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C2]](s64) ; CHECK-NEXT: G_STORE [[COPY1]](s64), [[PTR_ADD]](p0) :: (store (s64) into unknown-address + 8) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C3]](s64) ; CHECK-NEXT: G_STORE [[EXTRACT]](s32), [[PTR_ADD1]](p0) :: (store (s32) into unknown-address + 16, align 8) ; CHECK-NEXT: RET_ReallyLR %ptr:_(p0) = COPY $x0 @@ -163,10 +163,10 @@ body: | ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(s16) = G_EXTRACT [[AND2]](s64), 0 ; CHECK-NEXT: G_STORE [[COPY]](s64), %ptr(p0) :: (store (s64)) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C2]](s64) ; CHECK-NEXT: G_STORE [[COPY1]](s64), [[PTR_ADD]](p0) :: (store (s64) into unknown-address + 8) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C3]](s64) ; CHECK-NEXT: G_STORE [[EXTRACT]](s16), [[PTR_ADD1]](p0) :: (store (s16) into unknown-address + 16, align 8) ; CHECK-NEXT: RET_ReallyLR %ptr:_(p0) = COPY $x0 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-or.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-or.mir index 06cbec1..9edc1cb 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-or.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-or.mir @@ -84,16 +84,16 @@ body: | ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[OR4]], [[C1]] ; CHECK-NEXT: G_STORE [[AND]](s64), %ptr(p0) :: (store (s64), align 64) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C2]](s64) ; CHECK-NEXT: G_STORE [[AND1]](s64), [[PTR_ADD]](p0) :: (store (s64) into unknown-address + 8) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C3]](s64) ; CHECK-NEXT: G_STORE [[AND2]](s64), [[PTR_ADD1]](p0) :: (store (s64) into unknown-address + 16, align 16) ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 - ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C4]](s64) + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C4]](s64) ; CHECK-NEXT: G_STORE [[AND3]](s64), [[PTR_ADD2]](p0) :: (store (s64) into unknown-address + 24) ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C5]](s64) + ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C5]](s64) ; CHECK-NEXT: G_STORE [[AND4]](s64), [[PTR_ADD3]](p0) :: (store (s64) into unknown-address + 32, align 32) ; CHECK-NEXT: RET_ReallyLR implicit $x0 %a:_(s318) = G_IMPLICIT_DEF diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-phi.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-phi.mir index 86261bd..47aa570 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-phi.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-phi.mir @@ -715,7 +715,7 @@ body: | ; CHECK-NEXT: %ptr2:_(p0) = COPY $x0 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD %ptr1(p0) :: (load (<2 x s64>), align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr1, [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr1, [[C]](s64) ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16) ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 @@ -728,7 +728,7 @@ body: | ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(<2 x s64>) = G_LOAD %ptr2(p0) :: (load (<2 x s64>), align 32) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr2, [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr2, [[C2]](s64) ; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD1]](p0) :: (load (<2 x s64>) from unknown-address + 16) ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.2: @@ -903,7 +903,7 @@ body: | ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD %ptr1(p0) :: (load (<2 x s64>), align 32) ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[LOAD]](<2 x s64>) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr1, [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr1, [[C]](s64) ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16) ; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[LOAD1]](<2 x s64>) ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF @@ -918,7 +918,7 @@ body: | ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(<2 x s64>) = G_LOAD %ptr2(p0) :: (load (<2 x s64>), align 32) ; CHECK-NEXT: [[BITCAST2:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[LOAD2]](<2 x s64>) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr2, [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr2, [[C2]](s64) ; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD1]](p0) :: (load (<2 x s64>) from unknown-address + 16) ; CHECK-NEXT: [[BITCAST3:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[LOAD3]](<2 x s64>) ; CHECK-NEXT: {{ $}} diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector.mir index 0ab0487..2e70252 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector.mir @@ -165,7 +165,7 @@ body: | ; CHECK-NEXT: [[SHUF1:%[0-9]+]]:_(<2 x s64>) = G_SHUFFLE_VECTOR [[COPY3]](<2 x s64>), [[COPY]], shufflemask(1, 2) ; CHECK-NEXT: G_STORE [[SHUF]](<2 x s64>), [[COPY4]](p0) :: (store (<2 x s64>), align 32) ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY4]], [[C]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY4]], [[C]](s64) ; CHECK-NEXT: G_STORE [[SHUF1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16) ; CHECK-NEXT: RET_ReallyLR %3:_(<2 x s64>) = COPY $q0 @@ -208,7 +208,7 @@ body: | ; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<4 x s32>) = G_SHUFFLE_VECTOR [[COPY1]](<4 x s32>), [[COPY]], shufflemask(2, 6, 5, 3) ; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY4]](p0) :: (store (<4 x s32>), align 32) ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY4]], [[C4]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY4]], [[C4]](s64) ; CHECK-NEXT: G_STORE [[SHUF]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16) ; CHECK-NEXT: RET_ReallyLR %3:_(<4 x s32>) = COPY $q0 @@ -271,10 +271,10 @@ body: | ; CHECK-NEXT: [[BUILD_VECTOR7:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[EVEC2]](s64), [[EVEC3]](s64) ; CHECK-NEXT: G_STORE [[BUILD_VECTOR6]](<2 x s64>), [[COPY8]](p0) :: (store (<2 x s64>), align 64) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY8]], [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY8]], [[C2]](s64) ; CHECK-NEXT: G_STORE [[BUILD_VECTOR7]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY8]], [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY8]], [[C3]](s64) ; CHECK-NEXT: G_STORE [[SHUF]](<2 x s64>), [[PTR_ADD1]](p0) :: (store (<2 x s64>) into unknown-address + 32, align 32) ; CHECK-NEXT: RET_ReallyLR %3:_(s64) = COPY $d0 @@ -458,7 +458,7 @@ body: | ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[BUILD_VECTOR2]](<2 x s32>), [[BUILD_VECTOR3]](<2 x s32>) ; CHECK-NEXT: G_STORE [[CONCAT_VECTORS]](<4 x s32>), [[COPY8]](p0) :: (store (<4 x s32>), align 32) ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY8]], [[C4]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY8]], [[C4]](s64) ; CHECK-NEXT: G_STORE [[BUILD_VECTOR4]](<2 x s32>), [[PTR_ADD]](p0) :: (store (<2 x s32>) into unknown-address + 16, align 16) ; CHECK-NEXT: RET_ReallyLR %3:_(s32) = COPY $s0 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vacopy.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vacopy.mir index 5ada2dd..4f93f69 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vacopy.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vacopy.mir @@ -24,20 +24,20 @@ body: | ; CHECK-LINUX-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1 ; CHECK-LINUX-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY1]](p0) :: (load (s64)) ; CHECK-LINUX-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 - ; CHECK-LINUX-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C]](s64) + ; CHECK-LINUX-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64) ; CHECK-LINUX-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 8) ; CHECK-LINUX-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-LINUX-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C1]](s64) + ; CHECK-LINUX-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C1]](s64) ; CHECK-LINUX-NEXT: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (load (s64) from unknown-address + 16) ; CHECK-LINUX-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 - ; CHECK-LINUX-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY1]], [[C2]](s64) + ; CHECK-LINUX-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY1]], [[C2]](s64) ; CHECK-LINUX-NEXT: [[LOAD3:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD2]](p0) :: (load (s64) from unknown-address + 24) ; CHECK-LINUX-NEXT: G_STORE [[LOAD]](s64), [[COPY]](p0) :: (store (s64)) - ; CHECK-LINUX-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK-LINUX-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C]](s64) ; CHECK-LINUX-NEXT: G_STORE [[LOAD1]](s64), [[PTR_ADD3]](p0) :: (store (s64) into unknown-address + 8) - ; CHECK-LINUX-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C1]](s64) + ; CHECK-LINUX-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C1]](s64) ; CHECK-LINUX-NEXT: G_STORE [[LOAD2]](s64), [[PTR_ADD4]](p0) :: (store (s64) into unknown-address + 16) - ; CHECK-LINUX-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C2]](s64) + ; CHECK-LINUX-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD [[COPY]], [[C2]](s64) ; CHECK-LINUX-NEXT: G_STORE [[LOAD3]](s64), [[PTR_ADD5]](p0) :: (store (s64) into unknown-address + 24) ; CHECK-LINUX-NEXT: RET_ReallyLR %0:_(p0) = COPY $x0 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xor.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xor.mir index 3b70cfb..1e1ae01 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xor.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xor.mir @@ -46,16 +46,16 @@ body: | ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[XOR4]], [[C1]] ; CHECK-NEXT: G_STORE [[AND]](s64), %ptr(p0) :: (store (s64), align 64) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C2]](s64) ; CHECK-NEXT: G_STORE [[AND1]](s64), [[PTR_ADD]](p0) :: (store (s64) into unknown-address + 8) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C3]](s64) ; CHECK-NEXT: G_STORE [[AND2]](s64), [[PTR_ADD1]](p0) :: (store (s64) into unknown-address + 16, align 16) ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 - ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C4]](s64) + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C4]](s64) ; CHECK-NEXT: G_STORE [[AND3]](s64), [[PTR_ADD2]](p0) :: (store (s64) into unknown-address + 24) ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C5]](s64) + ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C5]](s64) ; CHECK-NEXT: G_STORE [[AND4]](s64), [[PTR_ADD3]](p0) :: (store (s64) into unknown-address + 32, align 32) ; CHECK-NEXT: RET_ReallyLR implicit $x0 %a:_(s318) = G_IMPLICIT_DEF @@ -90,16 +90,16 @@ body: | ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[XOR4]], [[C1]] ; CHECK-NEXT: G_STORE [[AND]](s64), %ptr(p0) :: (store (s64), align 64) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C2]](s64) ; CHECK-NEXT: G_STORE [[AND1]](s64), [[PTR_ADD]](p0) :: (store (s64) into unknown-address + 8) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C3]](s64) ; CHECK-NEXT: G_STORE [[AND2]](s64), [[PTR_ADD1]](p0) :: (store (s64) into unknown-address + 16, align 16) ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 - ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C4]](s64) + ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C4]](s64) ; CHECK-NEXT: G_STORE [[AND3]](s64), [[PTR_ADD2]](p0) :: (store (s64) into unknown-address + 24) ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 - ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C5]](s64) + ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C5]](s64) ; CHECK-NEXT: G_STORE [[AND4]](s64), [[PTR_ADD3]](p0) :: (store (s64) into unknown-address + 32, align 32) ; CHECK-NEXT: RET_ReallyLR implicit $x0 %a:_(s319) = G_IMPLICIT_DEF @@ -133,10 +133,10 @@ body: | ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[AND2]](s64), 0 ; CHECK-NEXT: G_STORE [[COPY]](s64), %ptr(p0) :: (store (s64), align 32) ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 - ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C2]](s64) + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C2]](s64) ; CHECK-NEXT: G_STORE [[COPY1]](s64), [[PTR_ADD]](p0) :: (store (s64) into unknown-address + 8) ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD %ptr, [[C3]](s64) + ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw inbounds G_PTR_ADD %ptr, [[C3]](s64) ; CHECK-NEXT: G_STORE [[EXTRACT]](s32), [[PTR_ADD1]](p0) :: (store (s32) into unknown-address + 16, align 16) ; CHECK-NEXT: RET_ReallyLR implicit $x0 %a:_(s158) = G_IMPLICIT_DEF diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir index 09e5a15..a422f60 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir @@ -667,11 +667,10 @@ body: | ; SELECT-NEXT: {{ $}} ; SELECT-NEXT: %zero:gpr64 = COPY $xzr ; SELECT-NEXT: %reg0:gpr64 = COPY $x0 - ; SELECT-NEXT: %shl:gpr64 = UBFMXri %reg0, 1, 0 + ; SELECT-NEXT: %cmp_lhs:gpr64 = SUBSXrs %zero, %reg0, 63, implicit-def dead $nzcv ; SELECT-NEXT: %reg1:gpr64 = COPY $x1 ; SELECT-NEXT: %sext_in_reg:gpr64 = SBFMXri %reg1, 0, 0 - ; SELECT-NEXT: %cmp_rhs:gpr64 = SUBSXrs %zero, %sext_in_reg, 131, implicit-def dead $nzcv - ; SELECT-NEXT: [[ADDSXrr:%[0-9]+]]:gpr64 = ADDSXrr %shl, %cmp_rhs, implicit-def $nzcv + ; SELECT-NEXT: [[ADDSXrs:%[0-9]+]]:gpr64 = ADDSXrs %cmp_lhs, %sext_in_reg, 131, implicit-def $nzcv ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv ; SELECT-NEXT: $w0 = COPY %cmp ; SELECT-NEXT: RET_ReallyLR implicit $w0 diff --git a/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll b/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll index 578038b..d9cdac4 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll @@ -1,8 +1,8 @@ ; RUN: llc -O3 -aarch64-enable-gep-opt=true -verify-machineinstrs %s -o - | FileCheck %s -; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare < %s 2>&1 | FileCheck --check-prefix=CHECK-UseAA %s -; RUN: llc -O3 -aarch64-enable-gep-opt=true -aarch64-use-aa=false -print-after=codegenprepare < %s 2>&1 | FileCheck --check-prefix=CHECK-NoAA %s -; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare -mcpu=cyclone < %s 2>&1 | FileCheck --check-prefix=CHECK-UseAA %s -; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare -mcpu=cortex-a53 < %s 2>&1 | FileCheck --check-prefix=CHECK-UseAA %s +; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare < %s 2>&1 | FileCheck --check-prefix=CHECK-IR %s +; RUN: llc -O3 -aarch64-enable-gep-opt=true -aarch64-use-aa=false -print-after=codegenprepare < %s 2>&1 | FileCheck --check-prefix=CHECK-IR %s +; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare -mcpu=cyclone < %s 2>&1 | FileCheck --check-prefix=CHECK-IR %s +; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare -mcpu=cortex-a53 < %s 2>&1 | FileCheck --check-prefix=CHECK-IR %s target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" target triple = "aarch64" @@ -38,24 +38,12 @@ if.end: ; preds = %if.then, %entry ; CHECK-NOT: madd ; CHECK:ldr -; CHECK-NoAA-LABEL: @test_GEP_CSE( -; CHECK-NoAA: [[PTR0:%[a-zA-Z0-9]+]] = ptrtoint ptr %string to i64 -; CHECK-NoAA: [[PTR1:%[a-zA-Z0-9]+]] = mul i64 %idxprom, 96 -; CHECK-NoAA: [[PTR2:%[a-zA-Z0-9]+]] = add i64 [[PTR0]], [[PTR1]] -; CHECK-NoAA: add i64 [[PTR2]], 23052 -; CHECK-NoAA: inttoptr -; CHECK-NoAA: if.then: -; CHECK-NoAA-NOT: ptrtoint -; CHECK-NoAA-NOT: mul -; CHECK-NoAA: add i64 [[PTR2]], 23048 -; CHECK-NoAA: inttoptr - -; CHECK-UseAA-LABEL: @test_GEP_CSE( -; CHECK-UseAA: [[IDX:%[a-zA-Z0-9]+]] = mul i64 %idxprom, 96 -; CHECK-UseAA: [[PTR1:%[a-zA-Z0-9]+]] = getelementptr i8, ptr %string, i64 [[IDX]] -; CHECK-UseAA: getelementptr i8, ptr [[PTR1]], i64 23052 -; CHECK-UseAA: if.then: -; CHECK-UseAA: getelementptr i8, ptr [[PTR1]], i64 23048 +; CHECK-IR-LABEL: @test_GEP_CSE( +; CHECK-IR: [[IDX:%[a-zA-Z0-9]+]] = mul i64 %idxprom, 96 +; CHECK-IR: [[PTR1:%[a-zA-Z0-9]+]] = getelementptr i8, ptr %string, i64 [[IDX]] +; CHECK-IR: getelementptr i8, ptr [[PTR1]], i64 23052 +; CHECK-IR: if.then: +; CHECK-IR: getelementptr i8, ptr [[PTR1]], i64 23048 %class.my = type { i32, [128 x i32], i32, [256 x %struct.pt]} %struct.pt = type { ptr, i32, i32 } diff --git a/llvm/test/CodeGen/AArch64/aarch64-isel-csinc-type.ll b/llvm/test/CodeGen/AArch64/aarch64-isel-csinc-type.ll index 7706ca9..9fab3d1 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-isel-csinc-type.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-isel-csinc-type.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=aarch64-- -o - < %s | FileCheck %s +; RUN: llc -mtriple=aarch64-- -o - < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD +; RUN: llc -mtriple=aarch64-- -global-isel -o - < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI ; Verify that we can fold csneg/csel into csinc instruction. @@ -8,12 +9,20 @@ target triple = "aarch64-unknown-linux-gnu" ; char csinc1 (char a, char b) { return !a ? b+1 : b+3; } define i8 @csinc1(i8 %a, i8 %b) local_unnamed_addr #0 { -; CHECK-LABEL: csinc1: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: tst w0, #0xff -; CHECK-NEXT: add w8, w1, #3 -; CHECK-NEXT: csinc w0, w8, w1, ne -; CHECK-NEXT: ret +; CHECK-SD-LABEL: csinc1: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: tst w0, #0xff +; CHECK-SD-NEXT: add w8, w1, #3 +; CHECK-SD-NEXT: csinc w0, w8, w1, ne +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: csinc1: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: mov w8, #3 // =0x3 +; CHECK-GI-NEXT: tst w0, #0xff +; CHECK-GI-NEXT: csinc w8, w8, wzr, ne +; CHECK-GI-NEXT: add w0, w8, w1 +; CHECK-GI-NEXT: ret entry: %tobool.not = icmp eq i8 %a, 0 %cond.v = select i1 %tobool.not, i8 1, i8 3 @@ -23,12 +32,20 @@ entry: ; short csinc2 (short a, short b) { return !a ? b+1 : b+3; } define i16 @csinc2(i16 %a, i16 %b) local_unnamed_addr #0 { -; CHECK-LABEL: csinc2: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: tst w0, #0xffff -; CHECK-NEXT: add w8, w1, #3 -; CHECK-NEXT: csinc w0, w8, w1, ne -; CHECK-NEXT: ret +; CHECK-SD-LABEL: csinc2: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: tst w0, #0xffff +; CHECK-SD-NEXT: add w8, w1, #3 +; CHECK-SD-NEXT: csinc w0, w8, w1, ne +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: csinc2: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: mov w8, #3 // =0x3 +; CHECK-GI-NEXT: tst w0, #0xffff +; CHECK-GI-NEXT: csinc w8, w8, wzr, ne +; CHECK-GI-NEXT: add w0, w8, w1 +; CHECK-GI-NEXT: ret entry: %tobool.not = icmp eq i16 %a, 0 %cond.v = select i1 %tobool.not, i16 1, i16 3 @@ -38,12 +55,20 @@ entry: ; int csinc3 (int a, int b) { return !a ? b+1 : b+3; } define i32 @csinc3(i32 %a, i32 %b) local_unnamed_addr #0 { -; CHECK-LABEL: csinc3: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: add w8, w1, #3 -; CHECK-NEXT: csinc w0, w8, w1, ne -; CHECK-NEXT: ret +; CHECK-SD-LABEL: csinc3: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: add w8, w1, #3 +; CHECK-SD-NEXT: csinc w0, w8, w1, ne +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: csinc3: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: mov w8, #3 // =0x3 +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: csinc w8, w8, wzr, ne +; CHECK-GI-NEXT: add w0, w8, w1 +; CHECK-GI-NEXT: ret entry: %tobool.not = icmp eq i32 %a, 0 %cond.v = select i1 %tobool.not, i32 1, i32 3 @@ -53,12 +78,20 @@ entry: ; long long csinc4 (long long a, long long b) { return !a ? b+1 : b+3; } define i64 @csinc4(i64 %a, i64 %b) local_unnamed_addr #0 { -; CHECK-LABEL: csinc4: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: cmp x0, #0 -; CHECK-NEXT: add x8, x1, #3 -; CHECK-NEXT: csinc x0, x8, x1, ne -; CHECK-NEXT: ret +; CHECK-SD-LABEL: csinc4: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: cmp x0, #0 +; CHECK-SD-NEXT: add x8, x1, #3 +; CHECK-SD-NEXT: csinc x0, x8, x1, ne +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: csinc4: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: mov w8, #3 // =0x3 +; CHECK-GI-NEXT: cmp x0, #0 +; CHECK-GI-NEXT: csinc x8, x8, xzr, ne +; CHECK-GI-NEXT: add x0, x8, x1 +; CHECK-GI-NEXT: ret entry: %tobool.not = icmp eq i64 %a, 0 %cond.v = select i1 %tobool.not, i64 1, i64 3 @@ -68,12 +101,21 @@ entry: ; long long csinc8 (long long a, long long b) { return a ? b-1 : b+1; } define i64 @csinc8(i64 %a, i64 %b) { -; CHECK-LABEL: csinc8: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: sub x8, x1, #1 -; CHECK-NEXT: cmp x0, #0 -; CHECK-NEXT: csinc x0, x8, x1, ne -; CHECK-NEXT: ret +; CHECK-SD-LABEL: csinc8: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: sub x8, x1, #1 +; CHECK-SD-NEXT: cmp x0, #0 +; CHECK-SD-NEXT: csinc x0, x8, x1, ne +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: csinc8: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: cmp x0, #0 +; CHECK-GI-NEXT: cset w8, ne +; CHECK-GI-NEXT: sbfx x8, x8, #0, #1 +; CHECK-GI-NEXT: orr x8, x8, #0x1 +; CHECK-GI-NEXT: add x0, x8, x1 +; CHECK-GI-NEXT: ret entry: %tobool.not = icmp eq i64 %a, 0 %cond.v = select i1 %tobool.not, i64 1, i64 -1 @@ -83,15 +125,26 @@ entry: ; long long csinc9 (long long a, long long b) { return a ? b+1 : b-1; } define i64 @csinc9(i64 %a, i64 %b) { -; CHECK-LABEL: csinc9: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: sub x8, x1, #1 -; CHECK-NEXT: cmp x0, #0 -; CHECK-NEXT: csinc x0, x8, x1, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: csinc9: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: sub x8, x1, #1 +; CHECK-SD-NEXT: cmp x0, #0 +; CHECK-SD-NEXT: csinc x0, x8, x1, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: csinc9: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: cmp x0, #0 +; CHECK-GI-NEXT: cset w8, eq +; CHECK-GI-NEXT: sbfx x8, x8, #0, #1 +; CHECK-GI-NEXT: orr x8, x8, #0x1 +; CHECK-GI-NEXT: add x0, x8, x1 +; CHECK-GI-NEXT: ret entry: %tobool.not = icmp eq i64 %a, 0 %cond.v = select i1 %tobool.not, i64 -1, i64 1 %cond = add nsw i64 %cond.v, %b ret i64 %cond } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK: {{.*}} diff --git a/llvm/test/CodeGen/AArch64/aarch64-wide-mul.ll b/llvm/test/CodeGen/AArch64/aarch64-wide-mul.ll index f7e16b8..9947fba 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-wide-mul.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-wide-mul.ll @@ -38,14 +38,12 @@ define <16 x i32> @mul_i32(<16 x i8> %a, <16 x i8> %b) { ; ; CHECK-GI-LABEL: mul_i32: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: ushll v2.8h, v0.8b, #0 -; CHECK-GI-NEXT: ushll v3.8h, v1.8b, #0 -; CHECK-GI-NEXT: ushll2 v4.8h, v0.16b, #0 -; CHECK-GI-NEXT: ushll2 v5.8h, v1.16b, #0 -; CHECK-GI-NEXT: umull v0.4s, v2.4h, v3.4h -; CHECK-GI-NEXT: umull2 v1.4s, v2.8h, v3.8h -; CHECK-GI-NEXT: umull v2.4s, v4.4h, v5.4h -; CHECK-GI-NEXT: umull2 v3.4s, v4.8h, v5.8h +; CHECK-GI-NEXT: umull v2.8h, v0.8b, v1.8b +; CHECK-GI-NEXT: umull2 v3.8h, v0.16b, v1.16b +; CHECK-GI-NEXT: ushll v0.4s, v2.4h, #0 +; CHECK-GI-NEXT: ushll2 v1.4s, v2.8h, #0 +; CHECK-GI-NEXT: ushll v2.4s, v3.4h, #0 +; CHECK-GI-NEXT: ushll2 v3.4s, v3.8h, #0 ; CHECK-GI-NEXT: ret entry: %ea = zext <16 x i8> %a to <16 x i32> @@ -75,26 +73,20 @@ define <16 x i64> @mul_i64(<16 x i8> %a, <16 x i8> %b) { ; ; CHECK-GI-LABEL: mul_i64: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: ushll v2.8h, v0.8b, #0 -; CHECK-GI-NEXT: ushll v3.8h, v1.8b, #0 -; CHECK-GI-NEXT: ushll2 v0.8h, v0.16b, #0 -; CHECK-GI-NEXT: ushll2 v1.8h, v1.16b, #0 -; CHECK-GI-NEXT: ushll v4.4s, v2.4h, #0 -; CHECK-GI-NEXT: ushll2 v5.4s, v2.8h, #0 -; CHECK-GI-NEXT: ushll v2.4s, v3.4h, #0 -; CHECK-GI-NEXT: ushll v6.4s, v0.4h, #0 -; CHECK-GI-NEXT: ushll2 v3.4s, v3.8h, #0 -; CHECK-GI-NEXT: ushll v7.4s, v1.4h, #0 -; CHECK-GI-NEXT: ushll2 v16.4s, v0.8h, #0 -; CHECK-GI-NEXT: ushll2 v17.4s, v1.8h, #0 -; CHECK-GI-NEXT: umull v0.2d, v4.2s, v2.2s -; CHECK-GI-NEXT: umull2 v1.2d, v4.4s, v2.4s -; CHECK-GI-NEXT: umull v2.2d, v5.2s, v3.2s -; CHECK-GI-NEXT: umull2 v3.2d, v5.4s, v3.4s -; CHECK-GI-NEXT: umull v4.2d, v6.2s, v7.2s -; CHECK-GI-NEXT: umull2 v5.2d, v6.4s, v7.4s -; CHECK-GI-NEXT: umull v6.2d, v16.2s, v17.2s -; CHECK-GI-NEXT: umull2 v7.2d, v16.4s, v17.4s +; CHECK-GI-NEXT: umull v2.8h, v0.8b, v1.8b +; CHECK-GI-NEXT: umull2 v0.8h, v0.16b, v1.16b +; CHECK-GI-NEXT: ushll v1.4s, v2.4h, #0 +; CHECK-GI-NEXT: ushll2 v3.4s, v2.8h, #0 +; CHECK-GI-NEXT: ushll v5.4s, v0.4h, #0 +; CHECK-GI-NEXT: ushll2 v7.4s, v0.8h, #0 +; CHECK-GI-NEXT: ushll v0.2d, v1.2s, #0 +; CHECK-GI-NEXT: ushll2 v1.2d, v1.4s, #0 +; CHECK-GI-NEXT: ushll v2.2d, v3.2s, #0 +; CHECK-GI-NEXT: ushll2 v3.2d, v3.4s, #0 +; CHECK-GI-NEXT: ushll v4.2d, v5.2s, #0 +; CHECK-GI-NEXT: ushll2 v5.2d, v5.4s, #0 +; CHECK-GI-NEXT: ushll v6.2d, v7.2s, #0 +; CHECK-GI-NEXT: ushll2 v7.2d, v7.4s, #0 ; CHECK-GI-NEXT: ret entry: %ea = zext <16 x i8> %a to <16 x i64> @@ -142,18 +134,12 @@ define <16 x i32> @mla_i32(<16 x i8> %a, <16 x i8> %b, <16 x i32> %c) { ; ; CHECK-GI-LABEL: mla_i32: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: ushll v6.8h, v0.8b, #0 -; CHECK-GI-NEXT: ushll v7.8h, v1.8b, #0 -; CHECK-GI-NEXT: ushll2 v0.8h, v0.16b, #0 -; CHECK-GI-NEXT: ushll2 v1.8h, v1.16b, #0 -; CHECK-GI-NEXT: umlal v2.4s, v6.4h, v7.4h -; CHECK-GI-NEXT: umlal2 v3.4s, v6.8h, v7.8h -; CHECK-GI-NEXT: umlal v4.4s, v0.4h, v1.4h -; CHECK-GI-NEXT: umlal2 v5.4s, v0.8h, v1.8h -; CHECK-GI-NEXT: mov v0.16b, v2.16b -; CHECK-GI-NEXT: mov v1.16b, v3.16b -; CHECK-GI-NEXT: mov v2.16b, v4.16b -; CHECK-GI-NEXT: mov v3.16b, v5.16b +; CHECK-GI-NEXT: umull v6.8h, v0.8b, v1.8b +; CHECK-GI-NEXT: umull2 v7.8h, v0.16b, v1.16b +; CHECK-GI-NEXT: uaddw v0.4s, v2.4s, v6.4h +; CHECK-GI-NEXT: uaddw2 v1.4s, v3.4s, v6.8h +; CHECK-GI-NEXT: uaddw v2.4s, v4.4s, v7.4h +; CHECK-GI-NEXT: uaddw2 v3.4s, v5.4s, v7.8h ; CHECK-GI-NEXT: ret entry: %ea = zext <16 x i8> %a to <16 x i32> @@ -186,35 +172,21 @@ define <16 x i64> @mla_i64(<16 x i8> %a, <16 x i8> %b, <16 x i64> %c) { ; ; CHECK-GI-LABEL: mla_i64: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: mov v16.16b, v2.16b -; CHECK-GI-NEXT: mov v17.16b, v3.16b -; CHECK-GI-NEXT: mov v2.16b, v4.16b -; CHECK-GI-NEXT: mov v3.16b, v5.16b -; CHECK-GI-NEXT: mov v4.16b, v6.16b -; CHECK-GI-NEXT: mov v5.16b, v7.16b -; CHECK-GI-NEXT: ushll v6.8h, v0.8b, #0 -; CHECK-GI-NEXT: ushll v7.8h, v1.8b, #0 -; CHECK-GI-NEXT: ushll2 v0.8h, v0.16b, #0 -; CHECK-GI-NEXT: ushll2 v1.8h, v1.16b, #0 -; CHECK-GI-NEXT: ushll v18.4s, v6.4h, #0 -; CHECK-GI-NEXT: ushll v20.4s, v7.4h, #0 -; CHECK-GI-NEXT: ushll2 v19.4s, v6.8h, #0 -; CHECK-GI-NEXT: ushll v21.4s, v0.4h, #0 -; CHECK-GI-NEXT: ushll2 v22.4s, v7.8h, #0 -; CHECK-GI-NEXT: ushll v23.4s, v1.4h, #0 -; CHECK-GI-NEXT: ldp q6, q7, [sp] -; CHECK-GI-NEXT: ushll2 v0.4s, v0.8h, #0 -; CHECK-GI-NEXT: ushll2 v1.4s, v1.8h, #0 -; CHECK-GI-NEXT: umlal v16.2d, v18.2s, v20.2s -; CHECK-GI-NEXT: umlal2 v17.2d, v18.4s, v20.4s -; CHECK-GI-NEXT: umlal v2.2d, v19.2s, v22.2s -; CHECK-GI-NEXT: umlal2 v3.2d, v19.4s, v22.4s -; CHECK-GI-NEXT: umlal v4.2d, v21.2s, v23.2s -; CHECK-GI-NEXT: umlal2 v5.2d, v21.4s, v23.4s -; CHECK-GI-NEXT: umlal v6.2d, v0.2s, v1.2s -; CHECK-GI-NEXT: umlal2 v7.2d, v0.4s, v1.4s -; CHECK-GI-NEXT: mov v0.16b, v16.16b -; CHECK-GI-NEXT: mov v1.16b, v17.16b +; CHECK-GI-NEXT: umull v16.8h, v0.8b, v1.8b +; CHECK-GI-NEXT: umull2 v0.8h, v0.16b, v1.16b +; CHECK-GI-NEXT: ldp q19, q20, [sp] +; CHECK-GI-NEXT: ushll v1.4s, v16.4h, #0 +; CHECK-GI-NEXT: ushll2 v16.4s, v16.8h, #0 +; CHECK-GI-NEXT: ushll v17.4s, v0.4h, #0 +; CHECK-GI-NEXT: ushll2 v18.4s, v0.8h, #0 +; CHECK-GI-NEXT: uaddw v0.2d, v2.2d, v1.2s +; CHECK-GI-NEXT: uaddw2 v1.2d, v3.2d, v1.4s +; CHECK-GI-NEXT: uaddw v2.2d, v4.2d, v16.2s +; CHECK-GI-NEXT: uaddw2 v3.2d, v5.2d, v16.4s +; CHECK-GI-NEXT: uaddw v4.2d, v6.2d, v17.2s +; CHECK-GI-NEXT: uaddw2 v5.2d, v7.2d, v17.4s +; CHECK-GI-NEXT: uaddw v6.2d, v19.2d, v18.2s +; CHECK-GI-NEXT: uaddw2 v7.2d, v20.2d, v18.4s ; CHECK-GI-NEXT: ret entry: %ea = zext <16 x i8> %a to <16 x i64> diff --git a/llvm/test/CodeGen/AArch64/abds-neg.ll b/llvm/test/CodeGen/AArch64/abds-neg.ll index 432ffc3..7524782 100644 --- a/llvm/test/CodeGen/AArch64/abds-neg.ll +++ b/llvm/test/CodeGen/AArch64/abds-neg.ll @@ -77,10 +77,8 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_ext_i16_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: sub w9, w1, w8 -; CHECK-NEXT: subs w8, w8, w1 -; CHECK-NEXT: csel w8, w8, w9, gt -; CHECK-NEXT: neg w0, w8 +; CHECK-NEXT: subs w8, w1, w8 +; CHECK-NEXT: cneg w0, w8, ge ; CHECK-NEXT: ret %aext = sext i16 %a to i64 %bext = sext i32 %b to i64 @@ -111,10 +109,8 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_ext_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w1, w0 -; CHECK-NEXT: subs w9, w0, w1 -; CHECK-NEXT: csel w8, w9, w8, gt -; CHECK-NEXT: neg w0, w8 +; CHECK-NEXT: subs w8, w1, w0 +; CHECK-NEXT: cneg w0, w8, ge ; CHECK-NEXT: ret %aext = sext i32 %a to i64 %bext = sext i32 %b to i64 @@ -129,10 +125,8 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i32_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: sxth w8, w1 -; CHECK-NEXT: sub w9, w8, w0 -; CHECK-NEXT: subs w8, w0, w8 -; CHECK-NEXT: csel w8, w8, w9, gt -; CHECK-NEXT: neg w0, w8 +; CHECK-NEXT: subs w8, w8, w0 +; CHECK-NEXT: cneg w0, w8, ge ; CHECK-NEXT: ret %aext = sext i32 %a to i64 %bext = sext i16 %b to i64 @@ -146,10 +140,8 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_ext_i32_undef: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w1, w0 -; CHECK-NEXT: subs w9, w0, w1 -; CHECK-NEXT: csel w8, w9, w8, gt -; CHECK-NEXT: neg w0, w8 +; CHECK-NEXT: subs w8, w1, w0 +; CHECK-NEXT: cneg w0, w8, ge ; CHECK-NEXT: ret %aext = sext i32 %a to i64 %bext = sext i32 %b to i64 @@ -163,10 +155,8 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: abd_ext_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x1, x0 -; CHECK-NEXT: subs x9, x0, x1 -; CHECK-NEXT: csel x8, x9, x8, gt -; CHECK-NEXT: neg x0, x8 +; CHECK-NEXT: subs x8, x1, x0 +; CHECK-NEXT: cneg x0, x8, ge ; CHECK-NEXT: ret %aext = sext i64 %a to i128 %bext = sext i64 %b to i128 @@ -180,10 +170,8 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: abd_ext_i64_undef: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x1, x0 -; CHECK-NEXT: subs x9, x0, x1 -; CHECK-NEXT: csel x8, x9, x8, gt -; CHECK-NEXT: neg x0, x8 +; CHECK-NEXT: subs x8, x1, x0 +; CHECK-NEXT: cneg x0, x8, ge ; CHECK-NEXT: ret %aext = sext i64 %a to i128 %bext = sext i64 %b to i128 @@ -359,9 +347,8 @@ define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind { define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_cmp_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w1, w0 -; CHECK-NEXT: subs w9, w0, w1 -; CHECK-NEXT: csel w0, w8, w9, ge +; CHECK-NEXT: subs w8, w0, w1 +; CHECK-NEXT: cneg w0, w8, ge ; CHECK-NEXT: ret %cmp = icmp sge i32 %a, %b %ab = sub i32 %a, %b @@ -373,9 +360,8 @@ define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind { define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: abd_cmp_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x1, x0 -; CHECK-NEXT: subs x9, x0, x1 -; CHECK-NEXT: csel x0, x9, x8, lt +; CHECK-NEXT: subs x8, x0, x1 +; CHECK-NEXT: cneg x0, x8, ge ; CHECK-NEXT: ret %cmp = icmp slt i64 %a, %b %ab = sub i64 %a, %b diff --git a/llvm/test/CodeGen/AArch64/abds.ll b/llvm/test/CodeGen/AArch64/abds.ll index ed1e607..bbdb116 100644 --- a/llvm/test/CodeGen/AArch64/abds.ll +++ b/llvm/test/CodeGen/AArch64/abds.ll @@ -73,9 +73,8 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_ext_i16_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: sub w9, w1, w8 ; CHECK-NEXT: subs w8, w8, w1 -; CHECK-NEXT: csel w0, w8, w9, gt +; CHECK-NEXT: cneg w0, w8, le ; CHECK-NEXT: ret %aext = sext i16 %a to i64 %bext = sext i32 %b to i64 @@ -104,9 +103,8 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_ext_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w1, w0 -; CHECK-NEXT: subs w9, w0, w1 -; CHECK-NEXT: csel w0, w9, w8, gt +; CHECK-NEXT: subs w8, w0, w1 +; CHECK-NEXT: cneg w0, w8, le ; CHECK-NEXT: ret %aext = sext i32 %a to i64 %bext = sext i32 %b to i64 @@ -120,9 +118,8 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i32_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: sxth w8, w1 -; CHECK-NEXT: sub w9, w8, w0 ; CHECK-NEXT: subs w8, w0, w8 -; CHECK-NEXT: csel w0, w8, w9, gt +; CHECK-NEXT: cneg w0, w8, le ; CHECK-NEXT: ret %aext = sext i32 %a to i64 %bext = sext i16 %b to i64 @@ -135,9 +132,8 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_ext_i32_undef: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w1, w0 -; CHECK-NEXT: subs w9, w0, w1 -; CHECK-NEXT: csel w0, w9, w8, gt +; CHECK-NEXT: subs w8, w0, w1 +; CHECK-NEXT: cneg w0, w8, le ; CHECK-NEXT: ret %aext = sext i32 %a to i64 %bext = sext i32 %b to i64 @@ -150,9 +146,8 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: abd_ext_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x1, x0 -; CHECK-NEXT: subs x9, x0, x1 -; CHECK-NEXT: csel x0, x9, x8, gt +; CHECK-NEXT: subs x8, x0, x1 +; CHECK-NEXT: cneg x0, x8, le ; CHECK-NEXT: ret %aext = sext i64 %a to i128 %bext = sext i64 %b to i128 @@ -165,9 +160,8 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: abd_ext_i64_undef: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x1, x0 -; CHECK-NEXT: subs x9, x0, x1 -; CHECK-NEXT: csel x0, x9, x8, gt +; CHECK-NEXT: subs x8, x0, x1 +; CHECK-NEXT: cneg x0, x8, le ; CHECK-NEXT: ret %aext = sext i64 %a to i128 %bext = sext i64 %b to i128 @@ -248,9 +242,8 @@ define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind { define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_minmax_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w1, w0 -; CHECK-NEXT: subs w9, w0, w1 -; CHECK-NEXT: csel w0, w9, w8, gt +; CHECK-NEXT: subs w8, w0, w1 +; CHECK-NEXT: cneg w0, w8, le ; CHECK-NEXT: ret %min = call i32 @llvm.smin.i32(i32 %a, i32 %b) %max = call i32 @llvm.smax.i32(i32 %a, i32 %b) @@ -261,9 +254,8 @@ define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind { define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: abd_minmax_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x1, x0 -; CHECK-NEXT: subs x9, x0, x1 -; CHECK-NEXT: csel x0, x9, x8, gt +; CHECK-NEXT: subs x8, x0, x1 +; CHECK-NEXT: cneg x0, x8, le ; CHECK-NEXT: ret %min = call i64 @llvm.smin.i64(i64 %a, i64 %b) %max = call i64 @llvm.smax.i64(i64 %a, i64 %b) @@ -324,9 +316,8 @@ define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind { define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_cmp_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w1, w0 -; CHECK-NEXT: subs w9, w0, w1 -; CHECK-NEXT: csel w0, w9, w8, gt +; CHECK-NEXT: subs w8, w0, w1 +; CHECK-NEXT: cneg w0, w8, le ; CHECK-NEXT: ret %cmp = icmp slt i32 %a, %b %ab = sub i32 %a, %b @@ -338,9 +329,8 @@ define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind { define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: abd_cmp_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x1, x0 -; CHECK-NEXT: subs x9, x0, x1 -; CHECK-NEXT: csel x0, x9, x8, gt +; CHECK-NEXT: subs x8, x0, x1 +; CHECK-NEXT: cneg x0, x8, le ; CHECK-NEXT: ret %cmp = icmp sge i64 %a, %b %ab = sub i64 %a, %b @@ -572,9 +562,8 @@ define i16 @abd_select_i16(i16 %a, i16 %b) nounwind { define i32 @abd_select_i32(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_select_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w1, w0 -; CHECK-NEXT: subs w9, w0, w1 -; CHECK-NEXT: csel w0, w9, w8, gt +; CHECK-NEXT: subs w8, w0, w1 +; CHECK-NEXT: cneg w0, w8, le ; CHECK-NEXT: ret %cmp = icmp sgt i32 %a, %b %ab = select i1 %cmp, i32 %a, i32 %b @@ -586,9 +575,8 @@ define i32 @abd_select_i32(i32 %a, i32 %b) nounwind { define i64 @abd_select_i64(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: abd_select_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x1, x0 -; CHECK-NEXT: subs x9, x0, x1 -; CHECK-NEXT: csel x0, x9, x8, gt +; CHECK-NEXT: subs x8, x0, x1 +; CHECK-NEXT: cneg x0, x8, le ; CHECK-NEXT: ret %cmp = icmp sge i64 %a, %b %ab = select i1 %cmp, i64 %a, i64 %b diff --git a/llvm/test/CodeGen/AArch64/abdu-neg.ll b/llvm/test/CodeGen/AArch64/abdu-neg.ll index 8fb106e..d07f099a 100644 --- a/llvm/test/CodeGen/AArch64/abdu-neg.ll +++ b/llvm/test/CodeGen/AArch64/abdu-neg.ll @@ -77,10 +77,8 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_ext_i16_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w9, w1, w8 -; CHECK-NEXT: subs w8, w8, w1 -; CHECK-NEXT: csel w8, w8, w9, hi -; CHECK-NEXT: neg w0, w8 +; CHECK-NEXT: subs w8, w1, w8 +; CHECK-NEXT: cneg w0, w8, hs ; CHECK-NEXT: ret %aext = zext i16 %a to i64 %bext = zext i32 %b to i64 @@ -111,10 +109,8 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_ext_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w1, w0 -; CHECK-NEXT: subs w9, w0, w1 -; CHECK-NEXT: csel w8, w9, w8, hi -; CHECK-NEXT: neg w0, w8 +; CHECK-NEXT: subs w8, w1, w0 +; CHECK-NEXT: cneg w0, w8, hs ; CHECK-NEXT: ret %aext = zext i32 %a to i64 %bext = zext i32 %b to i64 @@ -129,10 +125,8 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i32_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w1, #0xffff -; CHECK-NEXT: sub w9, w8, w0 -; CHECK-NEXT: subs w8, w0, w8 -; CHECK-NEXT: csel w8, w8, w9, hi -; CHECK-NEXT: neg w0, w8 +; CHECK-NEXT: subs w8, w8, w0 +; CHECK-NEXT: cneg w0, w8, hs ; CHECK-NEXT: ret %aext = zext i32 %a to i64 %bext = zext i16 %b to i64 @@ -146,10 +140,8 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_ext_i32_undef: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w1, w0 -; CHECK-NEXT: subs w9, w0, w1 -; CHECK-NEXT: csel w8, w9, w8, hi -; CHECK-NEXT: neg w0, w8 +; CHECK-NEXT: subs w8, w1, w0 +; CHECK-NEXT: cneg w0, w8, hs ; CHECK-NEXT: ret %aext = zext i32 %a to i64 %bext = zext i32 %b to i64 @@ -163,10 +155,8 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: abd_ext_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x1, x0 -; CHECK-NEXT: subs x9, x0, x1 -; CHECK-NEXT: csel x8, x9, x8, hi -; CHECK-NEXT: neg x0, x8 +; CHECK-NEXT: subs x8, x1, x0 +; CHECK-NEXT: cneg x0, x8, hs ; CHECK-NEXT: ret %aext = zext i64 %a to i128 %bext = zext i64 %b to i128 @@ -180,10 +170,8 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: abd_ext_i64_undef: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x1, x0 -; CHECK-NEXT: subs x9, x0, x1 -; CHECK-NEXT: csel x8, x9, x8, hi -; CHECK-NEXT: neg x0, x8 +; CHECK-NEXT: subs x8, x1, x0 +; CHECK-NEXT: cneg x0, x8, hs ; CHECK-NEXT: ret %aext = zext i64 %a to i128 %bext = zext i64 %b to i128 @@ -363,9 +351,8 @@ define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind { define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_cmp_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w1, w0 -; CHECK-NEXT: subs w9, w0, w1 -; CHECK-NEXT: csel w0, w8, w9, hs +; CHECK-NEXT: subs w8, w0, w1 +; CHECK-NEXT: cneg w0, w8, hs ; CHECK-NEXT: ret %cmp = icmp uge i32 %a, %b %ab = sub i32 %a, %b @@ -377,9 +364,8 @@ define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind { define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: abd_cmp_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x1, x0 -; CHECK-NEXT: subs x9, x0, x1 -; CHECK-NEXT: csel x0, x9, x8, lo +; CHECK-NEXT: subs x8, x0, x1 +; CHECK-NEXT: cneg x0, x8, hs ; CHECK-NEXT: ret %cmp = icmp ult i64 %a, %b %ab = sub i64 %a, %b diff --git a/llvm/test/CodeGen/AArch64/abdu.ll b/llvm/test/CodeGen/AArch64/abdu.ll index 4585de9..1045ee2 100644 --- a/llvm/test/CodeGen/AArch64/abdu.ll +++ b/llvm/test/CodeGen/AArch64/abdu.ll @@ -73,9 +73,8 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_ext_i16_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: sub w9, w1, w8 ; CHECK-NEXT: subs w8, w8, w1 -; CHECK-NEXT: csel w0, w8, w9, hi +; CHECK-NEXT: cneg w0, w8, ls ; CHECK-NEXT: ret %aext = zext i16 %a to i64 %bext = zext i32 %b to i64 @@ -104,9 +103,8 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_ext_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w1, w0 -; CHECK-NEXT: subs w9, w0, w1 -; CHECK-NEXT: csel w0, w9, w8, hi +; CHECK-NEXT: subs w8, w0, w1 +; CHECK-NEXT: cneg w0, w8, ls ; CHECK-NEXT: ret %aext = zext i32 %a to i64 %bext = zext i32 %b to i64 @@ -120,9 +118,8 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i32_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w1, #0xffff -; CHECK-NEXT: sub w9, w8, w0 ; CHECK-NEXT: subs w8, w0, w8 -; CHECK-NEXT: csel w0, w8, w9, hi +; CHECK-NEXT: cneg w0, w8, ls ; CHECK-NEXT: ret %aext = zext i32 %a to i64 %bext = zext i16 %b to i64 @@ -135,9 +132,8 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_ext_i32_undef: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w1, w0 -; CHECK-NEXT: subs w9, w0, w1 -; CHECK-NEXT: csel w0, w9, w8, hi +; CHECK-NEXT: subs w8, w0, w1 +; CHECK-NEXT: cneg w0, w8, ls ; CHECK-NEXT: ret %aext = zext i32 %a to i64 %bext = zext i32 %b to i64 @@ -150,9 +146,8 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: abd_ext_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x1, x0 -; CHECK-NEXT: subs x9, x0, x1 -; CHECK-NEXT: csel x0, x9, x8, hi +; CHECK-NEXT: subs x8, x0, x1 +; CHECK-NEXT: cneg x0, x8, ls ; CHECK-NEXT: ret %aext = zext i64 %a to i128 %bext = zext i64 %b to i128 @@ -165,9 +160,8 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: abd_ext_i64_undef: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x1, x0 -; CHECK-NEXT: subs x9, x0, x1 -; CHECK-NEXT: csel x0, x9, x8, hi +; CHECK-NEXT: subs x8, x0, x1 +; CHECK-NEXT: cneg x0, x8, ls ; CHECK-NEXT: ret %aext = zext i64 %a to i128 %bext = zext i64 %b to i128 @@ -252,9 +246,8 @@ define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind { define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_minmax_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w1, w0 -; CHECK-NEXT: subs w9, w0, w1 -; CHECK-NEXT: csel w0, w9, w8, hi +; CHECK-NEXT: subs w8, w0, w1 +; CHECK-NEXT: cneg w0, w8, ls ; CHECK-NEXT: ret %min = call i32 @llvm.umin.i32(i32 %a, i32 %b) %max = call i32 @llvm.umax.i32(i32 %a, i32 %b) @@ -265,9 +258,8 @@ define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind { define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: abd_minmax_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x1, x0 -; CHECK-NEXT: subs x9, x0, x1 -; CHECK-NEXT: csel x0, x9, x8, hi +; CHECK-NEXT: subs x8, x0, x1 +; CHECK-NEXT: cneg x0, x8, ls ; CHECK-NEXT: ret %min = call i64 @llvm.umin.i64(i64 %a, i64 %b) %max = call i64 @llvm.umax.i64(i64 %a, i64 %b) @@ -330,9 +322,8 @@ define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind { define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_cmp_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w1, w0 -; CHECK-NEXT: subs w9, w0, w1 -; CHECK-NEXT: csel w0, w9, w8, hi +; CHECK-NEXT: subs w8, w0, w1 +; CHECK-NEXT: cneg w0, w8, ls ; CHECK-NEXT: ret %cmp = icmp ult i32 %a, %b %ab = sub i32 %a, %b @@ -344,9 +335,8 @@ define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind { define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: abd_cmp_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x1, x0 -; CHECK-NEXT: subs x9, x0, x1 -; CHECK-NEXT: csel x0, x9, x8, hi +; CHECK-NEXT: subs x8, x0, x1 +; CHECK-NEXT: cneg x0, x8, ls ; CHECK-NEXT: ret %cmp = icmp uge i64 %a, %b %ab = sub i64 %a, %b @@ -437,9 +427,8 @@ define i16 @abd_select_i16(i16 %a, i16 %b) nounwind { define i32 @abd_select_i32(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_select_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w1, w0 -; CHECK-NEXT: subs w9, w0, w1 -; CHECK-NEXT: csel w0, w9, w8, hi +; CHECK-NEXT: subs w8, w0, w1 +; CHECK-NEXT: cneg w0, w8, ls ; CHECK-NEXT: ret %cmp = icmp ugt i32 %a, %b %ab = select i1 %cmp, i32 %a, i32 %b @@ -451,9 +440,8 @@ define i32 @abd_select_i32(i32 %a, i32 %b) nounwind { define i64 @abd_select_i64(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: abd_select_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x1, x0 -; CHECK-NEXT: subs x9, x0, x1 -; CHECK-NEXT: csel x0, x9, x8, hi +; CHECK-NEXT: subs x8, x0, x1 +; CHECK-NEXT: cneg x0, x8, ls ; CHECK-NEXT: ret %cmp = icmp uge i64 %a, %b %ab = select i1 %cmp, i64 %a, i64 %b diff --git a/llvm/test/CodeGen/AArch64/adc.ll b/llvm/test/CodeGen/AArch64/adc.ll index 4b1393f..12e8bf2 100644 --- a/llvm/test/CodeGen/AArch64/adc.ll +++ b/llvm/test/CodeGen/AArch64/adc.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-apple-ios7.0 | FileCheck --check-prefix=CHECK-LE %s -; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64_be-none-linux-gnu | FileCheck --check-prefix=CHECK-BE %s +; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-apple-ios7.0 | FileCheck --check-prefixes=CHECK-LE %s +; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64_be-none-linux-gnu | FileCheck --check-prefixes=CHECK-BE %s +; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-apple-ios7.0 -global-isel | FileCheck --check-prefixes=CHECK-GI %s define i128 @test_simple(i128 %a, i128 %b, i128 %c) { ; CHECK-LE-LABEL: test_simple: @@ -18,11 +19,16 @@ define i128 @test_simple(i128 %a, i128 %b, i128 %c) { ; CHECK-BE-NEXT: subs x1, x8, x5 ; CHECK-BE-NEXT: sbc x0, x9, x4 ; CHECK-BE-NEXT: ret - +; +; CHECK-GI-LABEL: test_simple: +; CHECK-GI: ; %bb.0: +; CHECK-GI-NEXT: adds x8, x0, x2 +; CHECK-GI-NEXT: adc x9, x1, x3 +; CHECK-GI-NEXT: subs x0, x8, x4 +; CHECK-GI-NEXT: sbc x1, x9, x5 +; CHECK-GI-NEXT: ret %valadd = add i128 %a, %b - %valsub = sub i128 %valadd, %c - ret i128 %valsub } @@ -38,9 +44,13 @@ define i128 @test_imm(i128 %a) { ; CHECK-BE-NEXT: adds x1, x1, #12 ; CHECK-BE-NEXT: cinc x0, x0, hs ; CHECK-BE-NEXT: ret - +; +; CHECK-GI-LABEL: test_imm: +; CHECK-GI: ; %bb.0: +; CHECK-GI-NEXT: adds x0, x0, #12 +; CHECK-GI-NEXT: adc x1, x1, xzr +; CHECK-GI-NEXT: ret %val = add i128 %a, 12 - ret i128 %val } @@ -58,11 +68,16 @@ define i128 @test_shifted(i128 %a, i128 %b) { ; CHECK-BE-NEXT: adds x1, x1, x3, lsl #45 ; CHECK-BE-NEXT: adc x0, x0, x8 ; CHECK-BE-NEXT: ret - +; +; CHECK-GI-LABEL: test_shifted: +; CHECK-GI: ; %bb.0: +; CHECK-GI-NEXT: lsr x8, x2, #19 +; CHECK-GI-NEXT: adds x0, x0, x2, lsl #45 +; CHECK-GI-NEXT: orr x8, x8, x3, lsl #45 +; CHECK-GI-NEXT: adc x1, x1, x8 +; CHECK-GI-NEXT: ret %rhs = shl i128 %b, 45 - %val = add i128 %a, %rhs - ret i128 %val } @@ -86,11 +101,19 @@ define i128 @test_extended(i128 %a, i16 %b) { ; CHECK-BE-NEXT: extr x8, x9, x8, #61 ; CHECK-BE-NEXT: adc x0, x0, x8 ; CHECK-BE-NEXT: ret - +; +; CHECK-GI-LABEL: test_extended: +; CHECK-GI: ; %bb.0: +; CHECK-GI-NEXT: ; kill: def $w2 killed $w2 def $x2 +; CHECK-GI-NEXT: sxth x8, w2 +; CHECK-GI-NEXT: adds x0, x0, w2, sxth #3 +; CHECK-GI-NEXT: asr x9, x8, #63 +; CHECK-GI-NEXT: lsr x8, x8, #61 +; CHECK-GI-NEXT: orr x8, x8, x9, lsl #3 +; CHECK-GI-NEXT: adc x1, x1, x8 +; CHECK-GI-NEXT: ret %ext = sext i16 %b to i128 %rhs = shl i128 %ext, 3 - %val = add i128 %a, %rhs - ret i128 %val } diff --git a/llvm/test/CodeGen/AArch64/add-extract.ll b/llvm/test/CodeGen/AArch64/add-extract.ll index 67c9f74..923bf08 100644 --- a/llvm/test/CodeGen/AArch64/add-extract.ll +++ b/llvm/test/CodeGen/AArch64/add-extract.ll @@ -1,13 +1,21 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 -; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s +; RUN: llc -mtriple=aarch64-none-elf -mattr=+aes < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD +; RUN: llc -mtriple=aarch64-none-elf -mattr=+aes -global-isel < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI define i64 @add_i64_ext_load(<1 x i64> %A, ptr %B) nounwind { -; CHECK-LABEL: add_i64_ext_load: -; CHECK: // %bb.0: -; CHECK-NEXT: ldr d1, [x0] -; CHECK-NEXT: add d0, d0, d1 -; CHECK-NEXT: fmov x0, d0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_i64_ext_load: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ldr d1, [x0] +; CHECK-SD-NEXT: add d0, d0, d1 +; CHECK-SD-NEXT: fmov x0, d0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_i64_ext_load: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fmov x9, d0 +; CHECK-GI-NEXT: ldr x8, [x0] +; CHECK-GI-NEXT: add x0, x9, x8 +; CHECK-GI-NEXT: ret %a = extractelement <1 x i64> %A, i32 0 %b = load i64, ptr %B %c = add i64 %a, %b @@ -15,12 +23,19 @@ define i64 @add_i64_ext_load(<1 x i64> %A, ptr %B) nounwind { } define i64 @sub_i64_ext_load(<1 x i64> %A, ptr %B) nounwind { -; CHECK-LABEL: sub_i64_ext_load: -; CHECK: // %bb.0: -; CHECK-NEXT: ldr d1, [x0] -; CHECK-NEXT: sub d0, d0, d1 -; CHECK-NEXT: fmov x0, d0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: sub_i64_ext_load: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ldr d1, [x0] +; CHECK-SD-NEXT: sub d0, d0, d1 +; CHECK-SD-NEXT: fmov x0, d0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: sub_i64_ext_load: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fmov x9, d0 +; CHECK-GI-NEXT: ldr x8, [x0] +; CHECK-GI-NEXT: sub x0, x9, x8 +; CHECK-GI-NEXT: ret %a = extractelement <1 x i64> %A, i32 0 %b = load i64, ptr %B %c = sub i64 %a, %b @@ -28,12 +43,20 @@ define i64 @sub_i64_ext_load(<1 x i64> %A, ptr %B) nounwind { } define void @add_i64_ext_load_store(<1 x i64> %A, ptr %B) nounwind { -; CHECK-LABEL: add_i64_ext_load_store: -; CHECK: // %bb.0: -; CHECK-NEXT: ldr d1, [x0] -; CHECK-NEXT: add d0, d0, d1 -; CHECK-NEXT: str d0, [x0] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_i64_ext_load_store: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ldr d1, [x0] +; CHECK-SD-NEXT: add d0, d0, d1 +; CHECK-SD-NEXT: str d0, [x0] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_i64_ext_load_store: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fmov x9, d0 +; CHECK-GI-NEXT: ldr x8, [x0] +; CHECK-GI-NEXT: add x8, x9, x8 +; CHECK-GI-NEXT: str x8, [x0] +; CHECK-GI-NEXT: ret %a = extractelement <1 x i64> %A, i32 0 %b = load i64, ptr %B %c = add i64 %a, %b @@ -55,11 +78,18 @@ define i64 @add_v2i64_ext_load(<2 x i64> %A, ptr %B) nounwind { } define i64 @add_i64_ext_ext(<1 x i64> %A, <1 x i64> %B) nounwind { -; CHECK-LABEL: add_i64_ext_ext: -; CHECK: // %bb.0: -; CHECK-NEXT: add d0, d0, d1 -; CHECK-NEXT: fmov x0, d0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_i64_ext_ext: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: add d0, d0, d1 +; CHECK-SD-NEXT: fmov x0, d0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_i64_ext_ext: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: fmov x9, d1 +; CHECK-GI-NEXT: add x0, x8, x9 +; CHECK-GI-NEXT: ret %a = extractelement <1 x i64> %A, i32 0 %b = extractelement <1 x i64> %B, i32 0 %c = add i64 %a, %b @@ -67,13 +97,20 @@ define i64 @add_i64_ext_ext(<1 x i64> %A, <1 x i64> %B) nounwind { } define i32 @add_i32_ext_load(<1 x i32> %A, ptr %B) nounwind { -; CHECK-LABEL: add_i32_ext_load: -; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: fmov w9, s0 -; CHECK-NEXT: ldr w8, [x0] -; CHECK-NEXT: add w0, w9, w8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_i32_ext_load: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-SD-NEXT: fmov w9, s0 +; CHECK-SD-NEXT: ldr w8, [x0] +; CHECK-SD-NEXT: add w0, w9, w8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_i32_ext_load: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fmov w9, s0 +; CHECK-GI-NEXT: ldr w8, [x0] +; CHECK-GI-NEXT: add w0, w9, w8 +; CHECK-GI-NEXT: ret %a = extractelement <1 x i32> %A, i32 0 %b = load i32, ptr %B %c = add i32 %a, %b @@ -81,13 +118,22 @@ define i32 @add_i32_ext_load(<1 x i32> %A, ptr %B) nounwind { } define i64 @add_i64_ext_ext_test1(<1 x i64> %A, <2 x i64> %B) nounwind { -; CHECK-LABEL: add_i64_ext_ext_test1: -; CHECK: // %bb.0: -; CHECK-NEXT: ext v2.16b, v1.16b, v1.16b, #8 -; CHECK-NEXT: add d0, d0, d1 -; CHECK-NEXT: add d0, d0, d2 -; CHECK-NEXT: fmov x0, d0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: add_i64_ext_ext_test1: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ext v2.16b, v1.16b, v1.16b, #8 +; CHECK-SD-NEXT: add d0, d0, d1 +; CHECK-SD-NEXT: add d0, d0, d2 +; CHECK-SD-NEXT: fmov x0, d0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: add_i64_ext_ext_test1: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov x8, v1.d[1] +; CHECK-GI-NEXT: fmov x9, d0 +; CHECK-GI-NEXT: fmov x10, d1 +; CHECK-GI-NEXT: add x9, x9, x10 +; CHECK-GI-NEXT: add x0, x9, x8 +; CHECK-GI-NEXT: ret %a = extractelement <1 x i64> %A, i32 0 %b = extractelement <2 x i64> %B, i32 0 %c = extractelement <2 x i64> %B, i32 1 @@ -97,13 +143,22 @@ define i64 @add_i64_ext_ext_test1(<1 x i64> %A, <2 x i64> %B) nounwind { } define i64 @sub_i64_ext_ext_test1(<1 x i64> %A, <2 x i64> %B) nounwind { -; CHECK-LABEL: sub_i64_ext_ext_test1: -; CHECK: // %bb.0: -; CHECK-NEXT: ext v2.16b, v1.16b, v1.16b, #8 -; CHECK-NEXT: sub d0, d0, d1 -; CHECK-NEXT: sub d0, d0, d2 -; CHECK-NEXT: fmov x0, d0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: sub_i64_ext_ext_test1: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ext v2.16b, v1.16b, v1.16b, #8 +; CHECK-SD-NEXT: sub d0, d0, d1 +; CHECK-SD-NEXT: sub d0, d0, d2 +; CHECK-SD-NEXT: fmov x0, d0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: sub_i64_ext_ext_test1: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov x8, v1.d[1] +; CHECK-GI-NEXT: fmov x9, d0 +; CHECK-GI-NEXT: fmov x10, d1 +; CHECK-GI-NEXT: sub x9, x9, x10 +; CHECK-GI-NEXT: sub x0, x9, x8 +; CHECK-GI-NEXT: ret %a = extractelement <1 x i64> %A, i32 0 %b = extractelement <2 x i64> %B, i32 0 %c = extractelement <2 x i64> %B, i32 1 diff --git a/llvm/test/CodeGen/AArch64/addcarry-crash.ll b/llvm/test/CodeGen/AArch64/addcarry-crash.ll index be75ab1..b4556c7 100644 --- a/llvm/test/CodeGen/AArch64/addcarry-crash.ll +++ b/llvm/test/CodeGen/AArch64/addcarry-crash.ll @@ -1,16 +1,29 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s | FileCheck %s +; RUN: llc < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD +; RUN: llc < %s -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI + target triple = "arm64-apple-ios7.0" define i64 @foo(ptr nocapture readonly %ptr, i64 %a, i64 %b, i64 %c) local_unnamed_addr #0 { -; CHECK-LABEL: foo: -; CHECK: ; %bb.0: ; %entry -; CHECK-NEXT: lsr x8, x1, #32 -; CHECK-NEXT: ldr w9, [x0, #4] -; CHECK-NEXT: cmn x3, x2 -; CHECK-NEXT: umull x8, w9, w8 -; CHECK-NEXT: cinc x0, x8, hs -; CHECK-NEXT: ret +; CHECK-SD-LABEL: foo: +; CHECK-SD: ; %bb.0: ; %entry +; CHECK-SD-NEXT: lsr x8, x1, #32 +; CHECK-SD-NEXT: ldr w9, [x0, #4] +; CHECK-SD-NEXT: cmn x3, x2 +; CHECK-SD-NEXT: umull x8, w9, w8 +; CHECK-SD-NEXT: cinc x0, x8, hs +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: foo: +; CHECK-GI: ; %bb.0: ; %entry +; CHECK-GI-NEXT: ldr x8, [x0] +; CHECK-GI-NEXT: lsr x9, x1, #32 +; CHECK-GI-NEXT: cmn x3, x2 +; CHECK-GI-NEXT: cset w10, hs +; CHECK-GI-NEXT: lsr x8, x8, #32 +; CHECK-GI-NEXT: and x10, x10, #0x1 +; CHECK-GI-NEXT: umaddl x0, w8, w9, x10 +; CHECK-GI-NEXT: ret entry: %0 = lshr i64 %a, 32 %1 = load i64, ptr %ptr, align 8 @@ -24,3 +37,6 @@ entry: } attributes #0 = { norecurse nounwind readonly } + +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK: {{.*}} diff --git a/llvm/test/CodeGen/AArch64/addsub.ll b/llvm/test/CodeGen/AArch64/addsub.ll index 3a4955c..bb0d38a 100644 --- a/llvm/test/CodeGen/AArch64/addsub.ll +++ b/llvm/test/CodeGen/AArch64/addsub.ll @@ -1,50 +1,26 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-linux-gnu -verify-machineinstrs | FileCheck %s - -; Note that this should be refactored (for efficiency if nothing else) -; when the PCS is implemented so we don't have to worry about the -; loads and stores. - -@var_i32 = global i32 42 -@var2_i32 = global i32 43 -@var_i64 = global i64 0 +; RUN: llc -mtriple=aarch64-none-elf < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD +; RUN: llc -mtriple=aarch64-none-elf -global-isel < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI ; Add pure 12-bit immediates: -define void @add_small() { -; CHECK-LABEL: add_small: -; CHECK: // %bb.0: -; CHECK-NEXT: adrp x8, :got:var_i32 -; CHECK-NEXT: adrp x9, :got:var_i64 -; CHECK-NEXT: ldr x8, [x8, :got_lo12:var_i32] -; CHECK-NEXT: ldr x9, [x9, :got_lo12:var_i64] -; CHECK-NEXT: ldr w10, [x8] -; CHECK-NEXT: ldr x11, [x9] -; CHECK-NEXT: add w10, w10, #4095 -; CHECK-NEXT: add x11, x11, #52 -; CHECK-NEXT: str w10, [x8] -; CHECK-NEXT: str x11, [x9] -; CHECK-NEXT: ret - - %val32 = load i32, ptr @var_i32 +define i32 @add_small_i32(i32 %val32) { +; CHECK-LABEL: add_small_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: add w0, w0, #4095 +; CHECK-NEXT: ret %newval32 = add i32 %val32, 4095 - store i32 %newval32, ptr @var_i32 + ret i32 %newval32 +} - %val64 = load i64, ptr @var_i64 +define i64 @add_small_i64(i64 %val64) { +; CHECK-LABEL: add_small_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: add x0, x0, #52 +; CHECK-NEXT: ret %newval64 = add i64 %val64, 52 - store i64 %newval64, ptr @var_i64 - - ret void + ret i64 %newval64 } -; Make sure we grab the imm variant when the register operand -; can be implicitly zero-extend. -; We used to generate something horrible like this: -; wA = ldrb -; xB = ldimm 12 -; xC = add xB, wA, uxtb -; whereas this can be achieved with: -; wA = ldrb -; xC = add xA, #12 ; <- xA implicitly zero extend wA. define void @add_small_imm(ptr %p, ptr %q, i32 %b, ptr %addr) { ; CHECK-LABEL: add_small_imm: ; CHECK: // %bb.0: // %entry @@ -55,98 +31,71 @@ define void @add_small_imm(ptr %p, ptr %q, i32 %b, ptr %addr) { ; CHECK-NEXT: str x8, [x1] ; CHECK-NEXT: ret entry: - %t = load i8, ptr %p %promoted = zext i8 %t to i64 %zextt = zext i8 %t to i32 %add = add nuw i32 %zextt, %b - %add2 = add nuw i64 %promoted, 12 store i32 %add, ptr %addr - store i64 %add2, ptr %q ret void } ; Add 12-bit immediates, shifted left by 12 bits -define void @add_med() { -; CHECK-LABEL: add_med: -; CHECK: // %bb.0: -; CHECK-NEXT: adrp x8, :got:var_i32 -; CHECK-NEXT: adrp x9, :got:var_i64 -; CHECK-NEXT: ldr x8, [x8, :got_lo12:var_i32] -; CHECK-NEXT: ldr x9, [x9, :got_lo12:var_i64] -; CHECK-NEXT: ldr w10, [x8] -; CHECK-NEXT: ldr x11, [x9] -; CHECK-NEXT: add w10, w10, #3567, lsl #12 // =14610432 -; CHECK-NEXT: add x11, x11, #4095, lsl #12 // =16773120 -; CHECK-NEXT: str w10, [x8] -; CHECK-NEXT: str x11, [x9] -; CHECK-NEXT: ret - - %val32 = load i32, ptr @var_i32 +define i32 @add_med_i32(i32 %val32) { +; CHECK-LABEL: add_med_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: add w0, w0, #3567, lsl #12 // =14610432 +; CHECK-NEXT: ret %newval32 = add i32 %val32, 14610432 ; =0xdef000 - store i32 %newval32, ptr @var_i32 + ret i32 %newval32 +} - %val64 = load i64, ptr @var_i64 +define i64 @add_med_i64(i64 %val64) { +; CHECK-LABEL: add_med_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: add x0, x0, #4095, lsl #12 // =16773120 +; CHECK-NEXT: ret %newval64 = add i64 %val64, 16773120 ; =0xfff000 - store i64 %newval64, ptr @var_i64 - - ret void + ret i64 %newval64 } ; Subtract 12-bit immediates -define void @sub_small() { -; CHECK-LABEL: sub_small: -; CHECK: // %bb.0: -; CHECK-NEXT: adrp x8, :got:var_i32 -; CHECK-NEXT: adrp x9, :got:var_i64 -; CHECK-NEXT: ldr x8, [x8, :got_lo12:var_i32] -; CHECK-NEXT: ldr x9, [x9, :got_lo12:var_i64] -; CHECK-NEXT: ldr w10, [x8] -; CHECK-NEXT: ldr x11, [x9] -; CHECK-NEXT: sub w10, w10, #4095 -; CHECK-NEXT: sub x11, x11, #52 -; CHECK-NEXT: str w10, [x8] -; CHECK-NEXT: str x11, [x9] -; CHECK-NEXT: ret - - %val32 = load i32, ptr @var_i32 +define i32 @sub_small_i32(i32 %val32) { +; CHECK-LABEL: sub_small_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub w0, w0, #4095 +; CHECK-NEXT: ret %newval32 = sub i32 %val32, 4095 - store i32 %newval32, ptr @var_i32 + ret i32 %newval32 +} - %val64 = load i64, ptr @var_i64 +define i64 @sub_small_i64(i64 %val64) { +; CHECK-LABEL: sub_small_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: sub x0, x0, #52 +; CHECK-NEXT: ret %newval64 = sub i64 %val64, 52 - store i64 %newval64, ptr @var_i64 - - ret void + ret i64 %newval64 } ; Subtract 12-bit immediates, shifted left by 12 bits -define void @sub_med() { -; CHECK-LABEL: sub_med: -; CHECK: // %bb.0: -; CHECK-NEXT: adrp x8, :got:var_i32 -; CHECK-NEXT: adrp x9, :got:var_i64 -; CHECK-NEXT: ldr x8, [x8, :got_lo12:var_i32] -; CHECK-NEXT: ldr x9, [x9, :got_lo12:var_i64] -; CHECK-NEXT: ldr w10, [x8] -; CHECK-NEXT: ldr x11, [x9] -; CHECK-NEXT: sub w10, w10, #3567, lsl #12 // =14610432 -; CHECK-NEXT: sub x11, x11, #4095, lsl #12 // =16773120 -; CHECK-NEXT: str w10, [x8] -; CHECK-NEXT: str x11, [x9] -; CHECK-NEXT: ret - - %val32 = load i32, ptr @var_i32 +define i32 @sub_med_i32(i32 %val32) { +; CHECK-LABEL: sub_med_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub w0, w0, #3567, lsl #12 // =14610432 +; CHECK-NEXT: ret %newval32 = sub i32 %val32, 14610432 ; =0xdef000 - store i32 %newval32, ptr @var_i32 + ret i32 %newval32 +} - %val64 = load i64, ptr @var_i64 +define i64 @sub_med_i64(i64 %val64) { +; CHECK-LABEL: sub_med_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: sub x0, x0, #4095, lsl #12 // =16773120 +; CHECK-NEXT: ret %newval64 = sub i64 %val64, 16773120 ; =0xfff000 - store i64 %newval64, ptr @var_i64 - - ret void + ret i64 %newval64 } define i64 @add_two_parts_imm_i64(i64 %a) { @@ -261,10 +210,10 @@ define void @add_in_loop(i32 %0) { ; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: mov w19, #43690 // =0xaaaa ; CHECK-NEXT: movk w19, #170, lsl #16 -; CHECK-NEXT: .LBB15_1: // =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: .LBB19_1: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: add w0, w0, w19 ; CHECK-NEXT: bl foox -; CHECK-NEXT: b .LBB15_1 +; CHECK-NEXT: b .LBB19_1 br label %2 2: %3 = phi i32 [ %0, %1 ], [ %5, %2 ] @@ -273,75 +222,103 @@ define void @add_in_loop(i32 %0) { br label %2 } -define void @testing() { -; CHECK-LABEL: testing: -; CHECK: // %bb.0: -; CHECK-NEXT: adrp x8, :got:var_i32 -; CHECK-NEXT: ldr x8, [x8, :got_lo12:var_i32] -; CHECK-NEXT: ldr w9, [x8] -; CHECK-NEXT: cmp w9, #4095 -; CHECK-NEXT: b.ne .LBB16_6 -; CHECK-NEXT: // %bb.1: // %test2 -; CHECK-NEXT: adrp x10, :got:var2_i32 -; CHECK-NEXT: add w11, w9, #1 -; CHECK-NEXT: ldr x10, [x10, :got_lo12:var2_i32] -; CHECK-NEXT: str w11, [x8] -; CHECK-NEXT: ldr w10, [x10] -; CHECK-NEXT: cmp w10, #3567, lsl #12 // =14610432 -; CHECK-NEXT: b.lo .LBB16_6 -; CHECK-NEXT: // %bb.2: // %test3 -; CHECK-NEXT: add w11, w9, #2 -; CHECK-NEXT: cmp w9, #123 -; CHECK-NEXT: str w11, [x8] -; CHECK-NEXT: b.lt .LBB16_6 -; CHECK-NEXT: // %bb.3: // %test4 -; CHECK-NEXT: add w11, w9, #3 -; CHECK-NEXT: cmp w10, #321 -; CHECK-NEXT: str w11, [x8] -; CHECK-NEXT: b.gt .LBB16_6 -; CHECK-NEXT: // %bb.4: // %test5 -; CHECK-NEXT: add w11, w9, #4 -; CHECK-NEXT: cmn w10, #443 -; CHECK-NEXT: str w11, [x8] -; CHECK-NEXT: b.ge .LBB16_6 -; CHECK-NEXT: // %bb.5: // %test6 -; CHECK-NEXT: add w9, w9, #5 -; CHECK-NEXT: str w9, [x8] -; CHECK-NEXT: .LBB16_6: // %common.ret -; CHECK-NEXT: ret - %val = load i32, ptr @var_i32 - %val2 = load i32, ptr @var2_i32 +define void @testing(ptr %var_i32, ptr %var2_i32) { +; CHECK-SD-LABEL: testing: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ldr w8, [x0] +; CHECK-SD-NEXT: cmp w8, #4095 +; CHECK-SD-NEXT: b.ne .LBB20_6 +; CHECK-SD-NEXT: // %bb.1: // %test2 +; CHECK-SD-NEXT: ldr w9, [x1] +; CHECK-SD-NEXT: add w10, w8, #1 +; CHECK-SD-NEXT: str w10, [x0] +; CHECK-SD-NEXT: cmp w9, #3567, lsl #12 // =14610432 +; CHECK-SD-NEXT: b.lo .LBB20_6 +; CHECK-SD-NEXT: // %bb.2: // %test3 +; CHECK-SD-NEXT: add w10, w8, #2 +; CHECK-SD-NEXT: cmp w8, #123 +; CHECK-SD-NEXT: str w10, [x0] +; CHECK-SD-NEXT: b.lt .LBB20_6 +; CHECK-SD-NEXT: // %bb.3: // %test4 +; CHECK-SD-NEXT: add w10, w8, #3 +; CHECK-SD-NEXT: cmp w9, #321 +; CHECK-SD-NEXT: str w10, [x0] +; CHECK-SD-NEXT: b.gt .LBB20_6 +; CHECK-SD-NEXT: // %bb.4: // %test5 +; CHECK-SD-NEXT: add w10, w8, #4 +; CHECK-SD-NEXT: cmn w9, #443 +; CHECK-SD-NEXT: str w10, [x0] +; CHECK-SD-NEXT: b.ge .LBB20_6 +; CHECK-SD-NEXT: // %bb.5: // %test6 +; CHECK-SD-NEXT: add w8, w8, #5 +; CHECK-SD-NEXT: str w8, [x0] +; CHECK-SD-NEXT: .LBB20_6: // %common.ret +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: testing: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: ldr w8, [x0] +; CHECK-GI-NEXT: cmp w8, #4095 +; CHECK-GI-NEXT: b.ne .LBB20_6 +; CHECK-GI-NEXT: // %bb.1: // %test2 +; CHECK-GI-NEXT: ldr w9, [x1] +; CHECK-GI-NEXT: add w10, w8, #1 +; CHECK-GI-NEXT: str w10, [x0] +; CHECK-GI-NEXT: cmp w9, #3567, lsl #12 // =14610432 +; CHECK-GI-NEXT: b.lo .LBB20_6 +; CHECK-GI-NEXT: // %bb.2: // %test3 +; CHECK-GI-NEXT: add w10, w8, #2 +; CHECK-GI-NEXT: cmp w8, #123 +; CHECK-GI-NEXT: str w10, [x0] +; CHECK-GI-NEXT: b.lt .LBB20_6 +; CHECK-GI-NEXT: // %bb.3: // %test4 +; CHECK-GI-NEXT: add w10, w8, #3 +; CHECK-GI-NEXT: cmp w9, #321 +; CHECK-GI-NEXT: str w10, [x0] +; CHECK-GI-NEXT: b.gt .LBB20_6 +; CHECK-GI-NEXT: // %bb.4: // %test5 +; CHECK-GI-NEXT: add w10, w8, #4 +; CHECK-GI-NEXT: cmn w9, #444 +; CHECK-GI-NEXT: str w10, [x0] +; CHECK-GI-NEXT: b.gt .LBB20_6 +; CHECK-GI-NEXT: // %bb.5: // %test6 +; CHECK-GI-NEXT: add w8, w8, #5 +; CHECK-GI-NEXT: str w8, [x0] +; CHECK-GI-NEXT: .LBB20_6: // %common.ret +; CHECK-GI-NEXT: ret + %val = load i32, ptr %var_i32 + %val2 = load i32, ptr %var2_i32 %cmp_pos_small = icmp ne i32 %val, 4095 br i1 %cmp_pos_small, label %ret, label %test2 test2: %newval2 = add i32 %val, 1 - store i32 %newval2, ptr @var_i32 + store i32 %newval2, ptr %var_i32 %cmp_pos_big = icmp ult i32 %val2, 14610432 br i1 %cmp_pos_big, label %ret, label %test3 test3: %newval3 = add i32 %val, 2 - store i32 %newval3, ptr @var_i32 + store i32 %newval3, ptr %var_i32 %cmp_pos_slt = icmp slt i32 %val, 123 br i1 %cmp_pos_slt, label %ret, label %test4 test4: %newval4 = add i32 %val, 3 - store i32 %newval4, ptr @var_i32 + store i32 %newval4, ptr %var_i32 %cmp_pos_sgt = icmp sgt i32 %val2, 321 br i1 %cmp_pos_sgt, label %ret, label %test5 test5: %newval5 = add i32 %val, 4 - store i32 %newval5, ptr @var_i32 + store i32 %newval5, ptr %var_i32 %cmp_neg_uge = icmp sgt i32 %val2, -444 br i1 %cmp_neg_uge, label %ret, label %test6 test6: %newval6 = add i32 %val, 5 - store i32 %newval6, ptr @var_i32 + store i32 %newval6, ptr %var_i32 ret void ret: @@ -371,15 +348,26 @@ define i1 @sadd_add(i32 %a, i32 %b, ptr %p) { declare {i8, i1} @llvm.uadd.with.overflow.i8(i8 %a, i8 %b) define i1 @uadd_add(i8 %a, i8 %b, ptr %p) { -; CHECK-LABEL: uadd_add: -; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, #255 // =0xff -; CHECK-NEXT: bic w8, w8, w0 -; CHECK-NEXT: add w8, w8, w1, uxtb -; CHECK-NEXT: lsr w0, w8, #8 -; CHECK-NEXT: add w8, w8, #1 -; CHECK-NEXT: strb w8, [x2] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: uadd_add: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov w8, #255 // =0xff +; CHECK-SD-NEXT: bic w8, w8, w0 +; CHECK-SD-NEXT: add w8, w8, w1, uxtb +; CHECK-SD-NEXT: lsr w0, w8, #8 +; CHECK-SD-NEXT: add w8, w8, #1 +; CHECK-SD-NEXT: strb w8, [x2] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: uadd_add: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mvn w8, w0 +; CHECK-GI-NEXT: and w9, w1, #0xff +; CHECK-GI-NEXT: add w8, w9, w8, uxtb +; CHECK-GI-NEXT: cmp w8, w8, uxtb +; CHECK-GI-NEXT: add w8, w8, #1 +; CHECK-GI-NEXT: cset w0, ne +; CHECK-GI-NEXT: strb w8, [x2] +; CHECK-GI-NEXT: ret %nota = xor i8 %a, -1 %a0 = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 %nota, i8 %b) %e0 = extractvalue {i8, i1} %a0, 0 @@ -521,29 +509,48 @@ define i1 @reject_non_eqne_csinc(i32 %0) { } define i32 @accept_csel(i32 %0) { -; CHECK-LABEL: accept_csel: -; CHECK: // %bb.0: -; CHECK-NEXT: sub w9, w0, #273, lsl #12 // =1118208 -; CHECK-NEXT: mov w8, #17 // =0x11 -; CHECK-NEXT: cmp w9, #273 -; CHECK-NEXT: mov w9, #11 // =0xb -; CHECK-NEXT: csel w0, w9, w8, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: accept_csel: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub w9, w0, #273, lsl #12 // =1118208 +; CHECK-SD-NEXT: mov w8, #17 // =0x11 +; CHECK-SD-NEXT: cmp w9, #273 +; CHECK-SD-NEXT: mov w9, #11 // =0xb +; CHECK-SD-NEXT: csel w0, w9, w8, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: accept_csel: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub w8, w0, #273, lsl #12 // =1118208 +; CHECK-GI-NEXT: mov w9, #17 // =0x11 +; CHECK-GI-NEXT: mov w10, #11 // =0xb +; CHECK-GI-NEXT: cmp w8, #273 +; CHECK-GI-NEXT: csel w0, w10, w9, eq +; CHECK-GI-NEXT: ret %2 = icmp eq i32 %0, 1118481 %3 = select i1 %2, i32 11, i32 17 ret i32 %3 } define i32 @reject_non_eqne_csel(i32 %0) { -; CHECK-LABEL: reject_non_eqne_csel: -; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, #4369 // =0x1111 -; CHECK-NEXT: mov w9, #11 // =0xb -; CHECK-NEXT: movk w8, #17, lsl #16 -; CHECK-NEXT: cmp w0, w8 -; CHECK-NEXT: mov w8, #17 // =0x11 -; CHECK-NEXT: csel w0, w9, w8, lo -; CHECK-NEXT: ret +; CHECK-SD-LABEL: reject_non_eqne_csel: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov w8, #4369 // =0x1111 +; CHECK-SD-NEXT: mov w9, #11 // =0xb +; CHECK-SD-NEXT: movk w8, #17, lsl #16 +; CHECK-SD-NEXT: cmp w0, w8 +; CHECK-SD-NEXT: mov w8, #17 // =0x11 +; CHECK-SD-NEXT: csel w0, w9, w8, lo +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: reject_non_eqne_csel: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #4369 // =0x1111 +; CHECK-GI-NEXT: mov w9, #17 // =0x11 +; CHECK-GI-NEXT: mov w10, #11 // =0xb +; CHECK-GI-NEXT: movk w8, #17, lsl #16 +; CHECK-GI-NEXT: cmp w0, w8 +; CHECK-GI-NEXT: csel w0, w10, w9, lo +; CHECK-GI-NEXT: ret %2 = icmp ult i32 %0, 1118481 %3 = select i1 %2, i32 11, i32 17 ret i32 %3 @@ -556,10 +563,10 @@ define void @accept_branch(i32 %0) { ; CHECK: // %bb.0: ; CHECK-NEXT: sub w8, w0, #291, lsl #12 // =1191936 ; CHECK-NEXT: cmp w8, #1110 -; CHECK-NEXT: b.eq .LBB32_2 +; CHECK-NEXT: b.eq .LBB36_2 ; CHECK-NEXT: // %bb.1: ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB32_2: +; CHECK-NEXT: .LBB36_2: ; CHECK-NEXT: b fooy %2 = icmp ne i32 %0, 1193046 br i1 %2, label %4, label %3 @@ -576,10 +583,10 @@ define void @reject_non_eqne_branch(i32 %0) { ; CHECK-NEXT: mov w8, #13398 // =0x3456 ; CHECK-NEXT: movk w8, #18, lsl #16 ; CHECK-NEXT: cmp w0, w8 -; CHECK-NEXT: b.le .LBB33_2 +; CHECK-NEXT: b.le .LBB37_2 ; CHECK-NEXT: // %bb.1: ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB33_2: +; CHECK-NEXT: .LBB37_2: ; CHECK-NEXT: b fooy %2 = icmp sgt i32 %0, 1193046 br i1 %2, label %4, label %3 @@ -591,25 +598,45 @@ define void @reject_non_eqne_branch(i32 %0) { } define i32 @reject_multiple_usages(i32 %0) { -; CHECK-LABEL: reject_multiple_usages: -; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, #4369 // =0x1111 -; CHECK-NEXT: mov w9, #3 // =0x3 -; CHECK-NEXT: mov w10, #17 // =0x11 -; CHECK-NEXT: movk w8, #17, lsl #16 -; CHECK-NEXT: mov w11, #12 // =0xc -; CHECK-NEXT: cmp w0, w8 -; CHECK-NEXT: mov w8, #9 // =0x9 -; CHECK-NEXT: csel w8, w8, w9, eq -; CHECK-NEXT: csel w9, w11, w10, hi -; CHECK-NEXT: mov w10, #53312 // =0xd040 -; CHECK-NEXT: movk w10, #2, lsl #16 -; CHECK-NEXT: add w8, w8, w9 -; CHECK-NEXT: mov w9, #26304 // =0x66c0 -; CHECK-NEXT: cmp w0, w10 -; CHECK-NEXT: movk w9, #1433, lsl #16 -; CHECK-NEXT: csel w0, w8, w9, hi -; CHECK-NEXT: ret +; CHECK-SD-LABEL: reject_multiple_usages: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov w8, #4369 // =0x1111 +; CHECK-SD-NEXT: mov w9, #3 // =0x3 +; CHECK-SD-NEXT: mov w10, #17 // =0x11 +; CHECK-SD-NEXT: movk w8, #17, lsl #16 +; CHECK-SD-NEXT: mov w11, #12 // =0xc +; CHECK-SD-NEXT: cmp w0, w8 +; CHECK-SD-NEXT: mov w8, #9 // =0x9 +; CHECK-SD-NEXT: csel w8, w8, w9, eq +; CHECK-SD-NEXT: csel w9, w11, w10, hi +; CHECK-SD-NEXT: mov w10, #53312 // =0xd040 +; CHECK-SD-NEXT: movk w10, #2, lsl #16 +; CHECK-SD-NEXT: add w8, w8, w9 +; CHECK-SD-NEXT: mov w9, #26304 // =0x66c0 +; CHECK-SD-NEXT: cmp w0, w10 +; CHECK-SD-NEXT: movk w9, #1433, lsl #16 +; CHECK-SD-NEXT: csel w0, w8, w9, hi +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: reject_multiple_usages: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #4369 // =0x1111 +; CHECK-GI-NEXT: mov w9, #3 // =0x3 +; CHECK-GI-NEXT: mov w10, #9 // =0x9 +; CHECK-GI-NEXT: movk w8, #17, lsl #16 +; CHECK-GI-NEXT: mov w11, #12 // =0xc +; CHECK-GI-NEXT: cmp w0, w8 +; CHECK-GI-NEXT: mov w8, #17 // =0x11 +; CHECK-GI-NEXT: csel w9, w10, w9, eq +; CHECK-GI-NEXT: csel w8, w11, w8, hi +; CHECK-GI-NEXT: mov w10, #53312 // =0xd040 +; CHECK-GI-NEXT: movk w10, #2, lsl #16 +; CHECK-GI-NEXT: add w8, w9, w8 +; CHECK-GI-NEXT: mov w9, #26304 // =0x66c0 +; CHECK-GI-NEXT: movk w9, #1433, lsl #16 +; CHECK-GI-NEXT: cmp w0, w10 +; CHECK-GI-NEXT: csel w0, w8, w9, hi +; CHECK-GI-NEXT: ret %2 = icmp eq i32 %0, 1118481 %3 = icmp ugt i32 %0, 1118481 %4 = select i1 %2, i32 9, i32 3 @@ -629,12 +656,12 @@ define dso_local i32 @neigh_periodic_work_tbl_1() { ; CHECK-NEXT: add x8, x8, :lo12:neigh_periodic_work_tbl_1 ; CHECK-NEXT: add x8, x8, #18, lsl #12 // =73728 ; CHECK-NEXT: cmn x8, #1272 -; CHECK-NEXT: b.mi .LBB35_2 +; CHECK-NEXT: b.mi .LBB39_2 ; CHECK-NEXT: // %bb.1: // %if.end ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB35_2: // %for.cond +; CHECK-NEXT: .LBB39_2: // %for.cond ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: b .LBB35_2 +; CHECK-NEXT: b .LBB39_2 entry: %cmp = icmp slt i64 add (i64 ptrtoint (ptr @neigh_periodic_work_tbl_1 to i64), i64 75000), 0 br i1 %cmp, label %for.cond, label %if.end @@ -654,15 +681,15 @@ define dso_local i32 @_extract_crng_crng() { ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: adrp x8, _extract_crng_crng ; CHECK-NEXT: add x8, x8, :lo12:_extract_crng_crng -; CHECK-NEXT: tbnz x8, #63, .LBB36_2 +; CHECK-NEXT: tbnz x8, #63, .LBB40_2 ; CHECK-NEXT: // %bb.1: // %lor.lhs.false ; CHECK-NEXT: adrp x9, jiffies ; CHECK-NEXT: ldrsw x9, [x9, :lo12:jiffies] ; CHECK-NEXT: sub x8, x8, x9 ; CHECK-NEXT: add x8, x8, #18, lsl #12 // =73728 ; CHECK-NEXT: cmn x8, #1272 -; CHECK-NEXT: b.pl .LBB36_3 -; CHECK-NEXT: .LBB36_2: // %if.then +; CHECK-NEXT: b.pl .LBB40_3 +; CHECK-NEXT: .LBB40_2: // %if.then ; CHECK-NEXT: adrp x8, primary_crng ; CHECK-NEXT: ldr w8, [x8, :lo12:primary_crng] ; CHECK-NEXT: cmp w8, #0 @@ -670,7 +697,7 @@ define dso_local i32 @_extract_crng_crng() { ; CHECK-NEXT: add x8, x8, :lo12:input_pool ; CHECK-NEXT: csel x0, xzr, x8, eq ; CHECK-NEXT: b crng_reseed -; CHECK-NEXT: .LBB36_3: // %if.end +; CHECK-NEXT: .LBB40_3: // %if.end ; CHECK-NEXT: ret entry: %cmp2 = icmp slt ptr @_extract_crng_crng, null @@ -694,11 +721,18 @@ if.end: ; preds = %if.then, %lor.lhs.f ; ((X << C) - Y) + Z --> (Z - Y) + (X << C) define i32 @commute_subop0(i32 %x, i32 %y, i32 %z) { -; CHECK-LABEL: commute_subop0: -; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w2, w1 -; CHECK-NEXT: add w0, w8, w0, lsl #3 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: commute_subop0: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub w8, w2, w1 +; CHECK-SD-NEXT: add w0, w8, w0, lsl #3 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: commute_subop0: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: lsl w8, w0, #3 +; CHECK-GI-NEXT: sub w8, w8, w1 +; CHECK-GI-NEXT: add w0, w8, w2 +; CHECK-GI-NEXT: ret %shl = shl i32 %x, 3 %sub = sub i32 %shl, %y %add = add i32 %sub, %z @@ -707,11 +741,18 @@ define i32 @commute_subop0(i32 %x, i32 %y, i32 %z) { ; ((X >> C) - Y) + Z --> (Z - Y) + (X >> C) define i32 @commute_subop0_lshr(i32 %x, i32 %y, i32 %z) { -; CHECK-LABEL: commute_subop0_lshr: -; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w2, w1 -; CHECK-NEXT: add w0, w8, w0, lsr #3 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: commute_subop0_lshr: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub w8, w2, w1 +; CHECK-SD-NEXT: add w0, w8, w0, lsr #3 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: commute_subop0_lshr: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: lsr w8, w0, #3 +; CHECK-GI-NEXT: sub w8, w8, w1 +; CHECK-GI-NEXT: add w0, w8, w2 +; CHECK-GI-NEXT: ret %lshr = lshr i32 %x, 3 %sub = sub i32 %lshr, %y %add = add i32 %sub, %z @@ -720,11 +761,18 @@ define i32 @commute_subop0_lshr(i32 %x, i32 %y, i32 %z) { ; ((X >> C) - Y) + Z --> (Z - Y) + (X >> C) define i32 @commute_subop0_ashr(i32 %x, i32 %y, i32 %z) { -; CHECK-LABEL: commute_subop0_ashr: -; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w2, w1 -; CHECK-NEXT: add w0, w8, w0, asr #3 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: commute_subop0_ashr: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub w8, w2, w1 +; CHECK-SD-NEXT: add w0, w8, w0, asr #3 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: commute_subop0_ashr: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: asr w8, w0, #3 +; CHECK-GI-NEXT: sub w8, w8, w1 +; CHECK-GI-NEXT: add w0, w8, w2 +; CHECK-GI-NEXT: ret %ashr = ashr i32 %x, 3 %sub = sub i32 %ashr, %y %add = add i32 %sub, %z @@ -733,11 +781,19 @@ define i32 @commute_subop0_ashr(i32 %x, i32 %y, i32 %z) { ; ((sext X) - Y) + Z --> (Z - Y) + (sext X) define i64 @commute_subop0_sext(i32 %x, i64 %y, i64 %z) { -; CHECK-LABEL: commute_subop0_sext: -; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x2, x1 -; CHECK-NEXT: add x0, x8, w0, sxtw -; CHECK-NEXT: ret +; CHECK-SD-LABEL: commute_subop0_sext: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub x8, x2, x1 +; CHECK-SD-NEXT: add x0, x8, w0, sxtw +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: commute_subop0_sext: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: // kill: def $w0 killed $w0 def $x0 +; CHECK-GI-NEXT: sxtw x8, w0 +; CHECK-GI-NEXT: sub x8, x8, x1 +; CHECK-GI-NEXT: add x0, x8, x2 +; CHECK-GI-NEXT: ret %sext = sext i32 %x to i64 %sub = sub i64 %sext, %y %add = add i64 %sub, %z @@ -746,11 +802,18 @@ define i64 @commute_subop0_sext(i32 %x, i64 %y, i64 %z) { ; ((sext_inreg X) - Y) + Z --> (Z - Y) + (sext_inreg X) define i64 @commute_subop0_sext_inreg(i64 %x, i64 %y, i64 %z) { -; CHECK-LABEL: commute_subop0_sext_inreg: -; CHECK: // %bb.0: -; CHECK-NEXT: sub x8, x2, x1 -; CHECK-NEXT: add x0, x8, w0, sxth -; CHECK-NEXT: ret +; CHECK-SD-LABEL: commute_subop0_sext_inreg: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub x8, x2, x1 +; CHECK-SD-NEXT: add x0, x8, w0, sxth +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: commute_subop0_sext_inreg: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sxth x8, w0 +; CHECK-GI-NEXT: sub x8, x8, x1 +; CHECK-GI-NEXT: add x0, x8, x2 +; CHECK-GI-NEXT: ret %shl = shl i64 %x, 48 %ashr = ashr i64 %shl, 48 %sub = sub i64 %ashr, %y @@ -760,11 +823,18 @@ define i64 @commute_subop0_sext_inreg(i64 %x, i64 %y, i64 %z) { ; ((zext X) - Y) + Z --> (Z - Y) + (zext X) define i32 @commute_subop0_zext(i16 %x, i32 %y, i32 %z) { -; CHECK-LABEL: commute_subop0_zext: -; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w2, w1 -; CHECK-NEXT: add w0, w8, w0, uxth -; CHECK-NEXT: ret +; CHECK-SD-LABEL: commute_subop0_zext: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub w8, w2, w1 +; CHECK-SD-NEXT: add w0, w8, w0, uxth +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: commute_subop0_zext: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: and w8, w0, #0xffff +; CHECK-GI-NEXT: sub w8, w8, w1 +; CHECK-GI-NEXT: add w0, w8, w2 +; CHECK-GI-NEXT: ret %zext = zext i16 %x to i32 %sub = sub i32 %zext, %y %add = add i32 %sub, %z @@ -774,14 +844,25 @@ define i32 @commute_subop0_zext(i16 %x, i32 %y, i32 %z) { ; ((anyext X) - Y) + Z --> (Z - Y) + (anyext X) define i8 @commute_subop0_anyext(i16 %a, i16 %b, i32 %c) { -; CHECK-LABEL: commute_subop0_anyext: -; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, #111 // =0x6f -; CHECK-NEXT: sub w9, w2, w1 -; CHECK-NEXT: madd w8, w0, w8, w9 -; CHECK-NEXT: lsl w8, w8, #3 -; CHECK-NEXT: sub w0, w8, #1776 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: commute_subop0_anyext: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov w8, #111 // =0x6f +; CHECK-SD-NEXT: sub w9, w2, w1 +; CHECK-SD-NEXT: madd w8, w0, w8, w9 +; CHECK-SD-NEXT: lsl w8, w8, #3 +; CHECK-SD-NEXT: sub w0, w8, #1776 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: commute_subop0_anyext: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #111 // =0x6f +; CHECK-GI-NEXT: add w9, w1, #222 +; CHECK-GI-NEXT: mul w8, w0, w8 +; CHECK-GI-NEXT: and w8, w8, #0xffff +; CHECK-GI-NEXT: sub w8, w8, w9, uxth +; CHECK-GI-NEXT: add w8, w8, w2 +; CHECK-GI-NEXT: lsl w0, w8, #3 +; CHECK-GI-NEXT: ret %aa = mul i16 %a, 111 %bb = add i16 %b, 222 %a_32 = zext i16 %aa to i32 @@ -795,11 +876,18 @@ define i8 @commute_subop0_anyext(i16 %a, i16 %b, i32 %c) { ; ((X and C) - Y) + Z --> (Z - Y) + (X and C) define i32 @commute_subop0_and(i32 %x, i32 %y, i32 %z) { -; CHECK-LABEL: commute_subop0_and: -; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w2, w1 -; CHECK-NEXT: add w0, w8, w0, uxtb -; CHECK-NEXT: ret +; CHECK-SD-LABEL: commute_subop0_and: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub w8, w2, w1 +; CHECK-SD-NEXT: add w0, w8, w0, uxtb +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: commute_subop0_and: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: and w8, w0, #0xff +; CHECK-GI-NEXT: sub w8, w8, w1 +; CHECK-GI-NEXT: add w0, w8, w2 +; CHECK-GI-NEXT: ret %and = and i32 %x, 255 %sub = sub i32 %and, %y %add = add i32 %sub, %z @@ -808,11 +896,18 @@ define i32 @commute_subop0_and(i32 %x, i32 %y, i32 %z) { ; Z + ((X << C) - Y) --> (Z - Y) + (X << C) define i32 @commute_subop0_cadd(i32 %x, i32 %y, i32 %z) { -; CHECK-LABEL: commute_subop0_cadd: -; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w2, w1 -; CHECK-NEXT: add w0, w8, w0, lsl #3 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: commute_subop0_cadd: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub w8, w2, w1 +; CHECK-SD-NEXT: add w0, w8, w0, lsl #3 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: commute_subop0_cadd: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: lsl w8, w0, #3 +; CHECK-GI-NEXT: sub w8, w8, w1 +; CHECK-GI-NEXT: add w0, w2, w8 +; CHECK-GI-NEXT: ret %shl = shl i32 %x, 3 %sub = sub i32 %shl, %y %add = add i32 %z, %sub @@ -821,11 +916,18 @@ define i32 @commute_subop0_cadd(i32 %x, i32 %y, i32 %z) { ; Y + ((X << C) - X) --> (Y - X) + (X << C) define i32 @commute_subop0_mul(i32 %x, i32 %y) { -; CHECK-LABEL: commute_subop0_mul: -; CHECK: // %bb.0: -; CHECK-NEXT: sub w8, w1, w0 -; CHECK-NEXT: add w0, w8, w0, lsl #3 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: commute_subop0_mul: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub w8, w1, w0 +; CHECK-SD-NEXT: add w0, w8, w0, lsl #3 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: commute_subop0_mul: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: lsl w8, w0, #3 +; CHECK-GI-NEXT: sub w8, w8, w0 +; CHECK-GI-NEXT: add w0, w8, w1 +; CHECK-GI-NEXT: ret %mul = mul i32 %x, 7 %add = add i32 %mul, %y ret i32 %add @@ -863,13 +965,22 @@ define i32 @commute_subop0_zshiftc_oneuse(i32 %x, i32 %y, i32 %z) { } define i32 @commute_subop0_zshiftc(i32 %x, i32 %y, i32 %z) { -; CHECK-LABEL: commute_subop0_zshiftc: -; CHECK: // %bb.0: -; CHECK-NEXT: lsl w8, w2, #2 -; CHECK-NEXT: sub w9, w8, w1 -; CHECK-NEXT: add w9, w9, w0, lsl #3 -; CHECK-NEXT: eor w0, w8, w9 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: commute_subop0_zshiftc: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: lsl w8, w2, #2 +; CHECK-SD-NEXT: sub w9, w8, w1 +; CHECK-SD-NEXT: add w9, w9, w0, lsl #3 +; CHECK-SD-NEXT: eor w0, w8, w9 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: commute_subop0_zshiftc: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: lsl w8, w0, #3 +; CHECK-GI-NEXT: lsl w9, w2, #2 +; CHECK-GI-NEXT: sub w8, w8, w1 +; CHECK-GI-NEXT: add w8, w8, w9 +; CHECK-GI-NEXT: eor w0, w9, w8 +; CHECK-GI-NEXT: ret %xshl = shl i32 %x, 3 %sub = sub i32 %xshl, %y %zshl = shl i32 %z, 2 diff --git a/llvm/test/CodeGen/AArch64/arm64-this-return.ll b/llvm/test/CodeGen/AArch64/arm64-this-return.ll index a497ba2..7dd47ac 100644 --- a/llvm/test/CodeGen/AArch64/arm64-this-return.ll +++ b/llvm/test/CodeGen/AArch64/arm64-this-return.ll @@ -148,7 +148,7 @@ define ptr @E_ctor_base(ptr %this, i32 %x) { ; GISEL-MIR: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp ; GISEL-MIR: [[COPY2:%[0-9]+]]:_(p0) = COPY [[COPY]](p0) ; GISEL-MIR: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 - ; GISEL-MIR: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw nusw G_PTR_ADD [[COPY]], [[C]](s64) + ; GISEL-MIR: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[COPY]], [[C]](s64) ; GISEL-MIR: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp ; GISEL-MIR: $x0 = COPY [[PTR_ADD]](p0) ; GISEL-MIR: $w1 = COPY [[COPY1]](s32) diff --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll index b325851..78881c8 100644 --- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll +++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck -check-prefixes=CHECK,CHECK-SD %s +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s -check-prefixes=CHECK,CHECK-SD ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI define <8 x i16> @sabdl8h(ptr %A, ptr %B) nounwind { diff --git a/llvm/test/CodeGen/AArch64/arm64-vmul.ll b/llvm/test/CodeGen/AArch64/arm64-vmul.ll index 937a17c..07400bb 100644 --- a/llvm/test/CodeGen/AArch64/arm64-vmul.ll +++ b/llvm/test/CodeGen/AArch64/arm64-vmul.ll @@ -1,12 +1,50 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -mattr=+aes | FileCheck %s +; RUN: llc -mtriple=aarch64-none-elf -mattr=+aes < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD +; RUN: llc -mtriple=aarch64-none-elf -mattr=+aes -global-isel -global-isel-abort=2 2>&1 < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI + +; CHECK-GI: warning: Instruction selection used fallback path for pmull8h +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sqdmulh_1s +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_2s +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_4s +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_2d +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_commuted_neg_2s +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_commuted_neg_4s +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_commuted_neg_2d +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_indexed_2s +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_indexed_4s +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_indexed_2d +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_indexed_2s_strict +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_indexed_4s_strict +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_indexed_2d_strict +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmla_indexed_scalar_2s_strict +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmla_indexed_scalar_4s_strict +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmla_indexed_scalar_2d_strict +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sqdmulh_lane_1s +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sqdmlal_lane_1d +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sqdmlsl_lane_1d +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for pmull_from_extract_dup_low +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for pmull_from_extract_dup_high +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for pmull_from_extract_duplane_low +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for pmull_from_extract_duplane_high +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for scalar_fmls_from_extract_v4f32 +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for scalar_fmls_from_extract_v2f32 +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for scalar_fmls_from_extract_v2f64 +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_with_fneg_before_extract_v2f32 +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_with_fneg_before_extract_v2f32_1 +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_with_fneg_before_extract_v4f32 +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_with_fneg_before_extract_v4f32_1 +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_with_fneg_before_extract_v2f64 +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sqdmlal_d +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sqdmlsl_d +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for test_pmull_64 +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for test_pmull_high_64 define <8 x i16> @smull8h(ptr %A, ptr %B) nounwind { ; CHECK-LABEL: smull8h: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ldr d1, [x1] -; CHECK-NEXT: smull.8h v0, v0, v1 +; CHECK-NEXT: smull v0.8h, v0.8b, v1.8b ; CHECK-NEXT: ret %tmp1 = load <8 x i8>, ptr %A %tmp2 = load <8 x i8>, ptr %B @@ -19,7 +57,7 @@ define <4 x i32> @smull4s(ptr %A, ptr %B) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ldr d1, [x1] -; CHECK-NEXT: smull.4s v0, v0, v1 +; CHECK-NEXT: smull v0.4s, v0.4h, v1.4h ; CHECK-NEXT: ret %tmp1 = load <4 x i16>, ptr %A %tmp2 = load <4 x i16>, ptr %B @@ -32,7 +70,7 @@ define <2 x i64> @smull2d(ptr %A, ptr %B) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ldr d1, [x1] -; CHECK-NEXT: smull.2d v0, v0, v1 +; CHECK-NEXT: smull v0.2d, v0.2s, v1.2s ; CHECK-NEXT: ret %tmp1 = load <2 x i32>, ptr %A %tmp2 = load <2 x i32>, ptr %B @@ -49,7 +87,7 @@ define <8 x i16> @umull8h(ptr %A, ptr %B) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ldr d1, [x1] -; CHECK-NEXT: umull.8h v0, v0, v1 +; CHECK-NEXT: umull v0.8h, v0.8b, v1.8b ; CHECK-NEXT: ret %tmp1 = load <8 x i8>, ptr %A %tmp2 = load <8 x i8>, ptr %B @@ -62,7 +100,7 @@ define <4 x i32> @umull4s(ptr %A, ptr %B) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ldr d1, [x1] -; CHECK-NEXT: umull.4s v0, v0, v1 +; CHECK-NEXT: umull v0.4s, v0.4h, v1.4h ; CHECK-NEXT: ret %tmp1 = load <4 x i16>, ptr %A %tmp2 = load <4 x i16>, ptr %B @@ -75,7 +113,7 @@ define <2 x i64> @umull2d(ptr %A, ptr %B) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ldr d1, [x1] -; CHECK-NEXT: umull.2d v0, v0, v1 +; CHECK-NEXT: umull v0.2d, v0.2s, v1.2s ; CHECK-NEXT: ret %tmp1 = load <2 x i32>, ptr %A %tmp2 = load <2 x i32>, ptr %B @@ -92,7 +130,7 @@ define <4 x i32> @sqdmull4s(ptr %A, ptr %B) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ldr d1, [x1] -; CHECK-NEXT: sqdmull.4s v0, v0, v1 +; CHECK-NEXT: sqdmull v0.4s, v0.4h, v1.4h ; CHECK-NEXT: ret %tmp1 = load <4 x i16>, ptr %A %tmp2 = load <4 x i16>, ptr %B @@ -105,7 +143,7 @@ define <2 x i64> @sqdmull2d(ptr %A, ptr %B) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ldr d1, [x1] -; CHECK-NEXT: sqdmull.2d v0, v0, v1 +; CHECK-NEXT: sqdmull v0.2d, v0.2s, v1.2s ; CHECK-NEXT: ret %tmp1 = load <2 x i32>, ptr %A %tmp2 = load <2 x i32>, ptr %B @@ -114,12 +152,19 @@ define <2 x i64> @sqdmull2d(ptr %A, ptr %B) nounwind { } define <4 x i32> @sqdmull2_4s(ptr %A, ptr %B) nounwind { -; CHECK-LABEL: sqdmull2_4s: -; CHECK: // %bb.0: -; CHECK-NEXT: ldr d0, [x0, #8] -; CHECK-NEXT: ldr d1, [x1, #8] -; CHECK-NEXT: sqdmull.4s v0, v0, v1 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: sqdmull2_4s: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ldr d0, [x0, #8] +; CHECK-SD-NEXT: ldr d1, [x1, #8] +; CHECK-SD-NEXT: sqdmull v0.4s, v0.4h, v1.4h +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: sqdmull2_4s: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: ldr q0, [x0] +; CHECK-GI-NEXT: ldr q1, [x1] +; CHECK-GI-NEXT: sqdmull2 v0.4s, v0.8h, v1.8h +; CHECK-GI-NEXT: ret %load1 = load <8 x i16>, ptr %A %load2 = load <8 x i16>, ptr %B %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7> @@ -129,12 +174,19 @@ define <4 x i32> @sqdmull2_4s(ptr %A, ptr %B) nounwind { } define <2 x i64> @sqdmull2_2d(ptr %A, ptr %B) nounwind { -; CHECK-LABEL: sqdmull2_2d: -; CHECK: // %bb.0: -; CHECK-NEXT: ldr d0, [x0, #8] -; CHECK-NEXT: ldr d1, [x1, #8] -; CHECK-NEXT: sqdmull.2d v0, v0, v1 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: sqdmull2_2d: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ldr d0, [x0, #8] +; CHECK-SD-NEXT: ldr d1, [x1, #8] +; CHECK-SD-NEXT: sqdmull v0.2d, v0.2s, v1.2s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: sqdmull2_2d: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: ldr q0, [x0] +; CHECK-GI-NEXT: ldr q1, [x1] +; CHECK-GI-NEXT: sqdmull2 v0.2d, v0.4s, v1.4s +; CHECK-GI-NEXT: ret %load1 = load <4 x i32>, ptr %A %load2 = load <4 x i32>, ptr %B %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3> @@ -152,7 +204,7 @@ define <8 x i16> @pmull8h(ptr %A, ptr %B) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ldr d1, [x1] -; CHECK-NEXT: pmull.8h v0, v0, v1 +; CHECK-NEXT: pmull v0.8h, v0.8b, v1.8b ; CHECK-NEXT: ret %tmp1 = load <8 x i8>, ptr %A %tmp2 = load <8 x i8>, ptr %B @@ -167,7 +219,7 @@ define <4 x i16> @sqdmulh_4h(ptr %A, ptr %B) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ldr d1, [x1] -; CHECK-NEXT: sqdmulh.4h v0, v0, v1 +; CHECK-NEXT: sqdmulh v0.4h, v0.4h, v1.4h ; CHECK-NEXT: ret %tmp1 = load <4 x i16>, ptr %A %tmp2 = load <4 x i16>, ptr %B @@ -180,7 +232,7 @@ define <8 x i16> @sqdmulh_8h(ptr %A, ptr %B) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: ldr q1, [x1] -; CHECK-NEXT: sqdmulh.8h v0, v0, v1 +; CHECK-NEXT: sqdmulh v0.8h, v0.8h, v1.8h ; CHECK-NEXT: ret %tmp1 = load <8 x i16>, ptr %A %tmp2 = load <8 x i16>, ptr %B @@ -193,7 +245,7 @@ define <2 x i32> @sqdmulh_2s(ptr %A, ptr %B) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ldr d1, [x1] -; CHECK-NEXT: sqdmulh.2s v0, v0, v1 +; CHECK-NEXT: sqdmulh v0.2s, v0.2s, v1.2s ; CHECK-NEXT: ret %tmp1 = load <2 x i32>, ptr %A %tmp2 = load <2 x i32>, ptr %B @@ -206,7 +258,7 @@ define <4 x i32> @sqdmulh_4s(ptr %A, ptr %B) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: ldr q1, [x1] -; CHECK-NEXT: sqdmulh.4s v0, v0, v1 +; CHECK-NEXT: sqdmulh v0.4s, v0.4s, v1.4s ; CHECK-NEXT: ret %tmp1 = load <4 x i32>, ptr %A %tmp2 = load <4 x i32>, ptr %B @@ -241,7 +293,7 @@ define <4 x i16> @sqrdmulh_4h(ptr %A, ptr %B) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ldr d1, [x1] -; CHECK-NEXT: sqrdmulh.4h v0, v0, v1 +; CHECK-NEXT: sqrdmulh v0.4h, v0.4h, v1.4h ; CHECK-NEXT: ret %tmp1 = load <4 x i16>, ptr %A %tmp2 = load <4 x i16>, ptr %B @@ -254,7 +306,7 @@ define <8 x i16> @sqrdmulh_8h(ptr %A, ptr %B) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: ldr q1, [x1] -; CHECK-NEXT: sqrdmulh.8h v0, v0, v1 +; CHECK-NEXT: sqrdmulh v0.8h, v0.8h, v1.8h ; CHECK-NEXT: ret %tmp1 = load <8 x i16>, ptr %A %tmp2 = load <8 x i16>, ptr %B @@ -267,7 +319,7 @@ define <2 x i32> @sqrdmulh_2s(ptr %A, ptr %B) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ldr d1, [x1] -; CHECK-NEXT: sqrdmulh.2s v0, v0, v1 +; CHECK-NEXT: sqrdmulh v0.2s, v0.2s, v1.2s ; CHECK-NEXT: ret %tmp1 = load <2 x i32>, ptr %A %tmp2 = load <2 x i32>, ptr %B @@ -280,7 +332,7 @@ define <4 x i32> @sqrdmulh_4s(ptr %A, ptr %B) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: ldr q1, [x1] -; CHECK-NEXT: sqrdmulh.4s v0, v0, v1 +; CHECK-NEXT: sqrdmulh v0.4s, v0.4s, v1.4s ; CHECK-NEXT: ret %tmp1 = load <4 x i32>, ptr %A %tmp2 = load <4 x i32>, ptr %B @@ -289,15 +341,23 @@ define <4 x i32> @sqrdmulh_4s(ptr %A, ptr %B) nounwind { } define i32 @sqrdmulh_1s(ptr %A, ptr %B) nounwind { -; CHECK-LABEL: sqrdmulh_1s: -; CHECK: // %bb.0: -; CHECK-NEXT: ldr w8, [x0] -; CHECK-NEXT: ldr w9, [x1] -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: fmov s1, w9 -; CHECK-NEXT: sqrdmulh s0, s0, s1 -; CHECK-NEXT: fmov w0, s0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: sqrdmulh_1s: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ldr w8, [x0] +; CHECK-SD-NEXT: ldr w9, [x1] +; CHECK-SD-NEXT: fmov s0, w8 +; CHECK-SD-NEXT: fmov s1, w9 +; CHECK-SD-NEXT: sqrdmulh s0, s0, s1 +; CHECK-SD-NEXT: fmov w0, s0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: sqrdmulh_1s: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: ldr s0, [x0] +; CHECK-GI-NEXT: ldr s1, [x1] +; CHECK-GI-NEXT: sqrdmulh s0, s0, s1 +; CHECK-GI-NEXT: fmov w0, s0 +; CHECK-GI-NEXT: ret %tmp1 = load i32, ptr %A %tmp2 = load i32, ptr %B %tmp3 = call i32 @llvm.aarch64.neon.sqrdmulh.i32(i32 %tmp1, i32 %tmp2) @@ -315,7 +375,7 @@ define <2 x float> @fmulx_2s(ptr %A, ptr %B) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ldr d1, [x1] -; CHECK-NEXT: fmulx.2s v0, v0, v1 +; CHECK-NEXT: fmulx v0.2s, v0.2s, v1.2s ; CHECK-NEXT: ret %tmp1 = load <2 x float>, ptr %A %tmp2 = load <2 x float>, ptr %B @@ -328,7 +388,7 @@ define <4 x float> @fmulx_4s(ptr %A, ptr %B) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: ldr q1, [x1] -; CHECK-NEXT: fmulx.4s v0, v0, v1 +; CHECK-NEXT: fmulx v0.4s, v0.4s, v1.4s ; CHECK-NEXT: ret %tmp1 = load <4 x float>, ptr %A %tmp2 = load <4 x float>, ptr %B @@ -341,7 +401,7 @@ define <2 x double> @fmulx_2d(ptr %A, ptr %B) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: ldr q1, [x1] -; CHECK-NEXT: fmulx.2d v0, v0, v1 +; CHECK-NEXT: fmulx v0.2d, v0.2d, v1.2d ; CHECK-NEXT: ret %tmp1 = load <2 x double>, ptr %A %tmp2 = load <2 x double>, ptr %B @@ -359,7 +419,7 @@ define <4 x i32> @smlal4s(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-NEXT: ldr d1, [x0] ; CHECK-NEXT: ldr d2, [x1] ; CHECK-NEXT: ldr q0, [x2] -; CHECK-NEXT: smlal.4s v0, v1, v2 +; CHECK-NEXT: smlal v0.4s, v1.4h, v2.4h ; CHECK-NEXT: ret %tmp1 = load <4 x i16>, ptr %A %tmp2 = load <4 x i16>, ptr %B @@ -375,7 +435,7 @@ define <2 x i64> @smlal2d(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-NEXT: ldr d1, [x0] ; CHECK-NEXT: ldr d2, [x1] ; CHECK-NEXT: ldr q0, [x2] -; CHECK-NEXT: smlal.2d v0, v1, v2 +; CHECK-NEXT: smlal v0.2d, v1.2s, v2.2s ; CHECK-NEXT: ret %tmp1 = load <2 x i32>, ptr %A %tmp2 = load <2 x i32>, ptr %B @@ -386,14 +446,24 @@ define <2 x i64> @smlal2d(ptr %A, ptr %B, ptr %C) nounwind { } define void @smlal8h_chain_with_constant(ptr %dst, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3) { -; CHECK-LABEL: smlal8h_chain_with_constant: -; CHECK: // %bb.0: -; CHECK-NEXT: movi.16b v3, #1 -; CHECK-NEXT: smlal.8h v3, v0, v2 -; CHECK-NEXT: mvn.8b v0, v2 -; CHECK-NEXT: smlal.8h v3, v1, v0 -; CHECK-NEXT: str q3, [x0] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: smlal8h_chain_with_constant: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: movi v3.16b, #1 +; CHECK-SD-NEXT: smlal v3.8h, v0.8b, v2.8b +; CHECK-SD-NEXT: mvn v0.8b, v2.8b +; CHECK-SD-NEXT: smlal v3.8h, v1.8b, v0.8b +; CHECK-SD-NEXT: str q3, [x0] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: smlal8h_chain_with_constant: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mvn v3.8b, v2.8b +; CHECK-GI-NEXT: smull v1.8h, v1.8b, v3.8b +; CHECK-GI-NEXT: movi v3.16b, #1 +; CHECK-GI-NEXT: smlal v1.8h, v0.8b, v2.8b +; CHECK-GI-NEXT: add v0.8h, v1.8h, v3.8h +; CHECK-GI-NEXT: str q0, [x0] +; CHECK-GI-NEXT: ret %xor = xor <8 x i8> %v3, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> %smull.1 = tail call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %v1, <8 x i8> %v3) %add.1 = add <8 x i16> %smull.1, <i16 257, i16 257, i16 257, i16 257, i16 257, i16 257, i16 257, i16 257> @@ -404,15 +474,26 @@ define void @smlal8h_chain_with_constant(ptr %dst, <8 x i8> %v1, <8 x i8> %v2, < } define void @smlal2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2, <2 x i32> %v3) { -; CHECK-LABEL: smlal2d_chain_with_constant: -; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, #257 // =0x101 -; CHECK-NEXT: dup.2d v3, x8 -; CHECK-NEXT: smlal.2d v3, v0, v2 -; CHECK-NEXT: mvn.8b v0, v2 -; CHECK-NEXT: smlal.2d v3, v1, v0 -; CHECK-NEXT: str q3, [x0] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: smlal2d_chain_with_constant: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov w8, #257 // =0x101 +; CHECK-SD-NEXT: dup v3.2d, x8 +; CHECK-SD-NEXT: smlal v3.2d, v0.2s, v2.2s +; CHECK-SD-NEXT: mvn v0.8b, v2.8b +; CHECK-SD-NEXT: smlal v3.2d, v1.2s, v0.2s +; CHECK-SD-NEXT: str q3, [x0] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: smlal2d_chain_with_constant: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mvn v3.8b, v2.8b +; CHECK-GI-NEXT: adrp x8, .LCPI27_0 +; CHECK-GI-NEXT: smull v1.2d, v1.2s, v3.2s +; CHECK-GI-NEXT: smlal v1.2d, v0.2s, v2.2s +; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI27_0] +; CHECK-GI-NEXT: add v0.2d, v1.2d, v0.2d +; CHECK-GI-NEXT: str q0, [x0] +; CHECK-GI-NEXT: ret %xor = xor <2 x i32> %v3, <i32 -1, i32 -1> %smull.1 = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %v1, <2 x i32> %v3) %add.1 = add <2 x i64> %smull.1, <i64 257, i64 257> @@ -428,7 +509,7 @@ define <4 x i32> @smlsl4s(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-NEXT: ldr d1, [x0] ; CHECK-NEXT: ldr d2, [x1] ; CHECK-NEXT: ldr q0, [x2] -; CHECK-NEXT: smlsl.4s v0, v1, v2 +; CHECK-NEXT: smlsl v0.4s, v1.4h, v2.4h ; CHECK-NEXT: ret %tmp1 = load <4 x i16>, ptr %A %tmp2 = load <4 x i16>, ptr %B @@ -444,7 +525,7 @@ define <2 x i64> @smlsl2d(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-NEXT: ldr d1, [x0] ; CHECK-NEXT: ldr d2, [x1] ; CHECK-NEXT: ldr q0, [x2] -; CHECK-NEXT: smlsl.2d v0, v1, v2 +; CHECK-NEXT: smlsl v0.2d, v1.2s, v2.2s ; CHECK-NEXT: ret %tmp1 = load <2 x i32>, ptr %A %tmp2 = load <2 x i32>, ptr %B @@ -457,10 +538,10 @@ define <2 x i64> @smlsl2d(ptr %A, ptr %B, ptr %C) nounwind { define void @smlsl8h_chain_with_constant(ptr %dst, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3) { ; CHECK-LABEL: smlsl8h_chain_with_constant: ; CHECK: // %bb.0: -; CHECK-NEXT: movi.16b v3, #1 -; CHECK-NEXT: smlsl.8h v3, v0, v2 -; CHECK-NEXT: mvn.8b v0, v2 -; CHECK-NEXT: smlsl.8h v3, v1, v0 +; CHECK-NEXT: movi v3.16b, #1 +; CHECK-NEXT: smlsl v3.8h, v0.8b, v2.8b +; CHECK-NEXT: mvn v0.8b, v2.8b +; CHECK-NEXT: smlsl v3.8h, v1.8b, v0.8b ; CHECK-NEXT: str q3, [x0] ; CHECK-NEXT: ret %xor = xor <8 x i8> %v3, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> @@ -473,15 +554,25 @@ define void @smlsl8h_chain_with_constant(ptr %dst, <8 x i8> %v1, <8 x i8> %v2, < } define void @smlsl2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2, <2 x i32> %v3) { -; CHECK-LABEL: smlsl2d_chain_with_constant: -; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, #257 // =0x101 -; CHECK-NEXT: dup.2d v3, x8 -; CHECK-NEXT: smlsl.2d v3, v0, v2 -; CHECK-NEXT: mvn.8b v0, v2 -; CHECK-NEXT: smlsl.2d v3, v1, v0 -; CHECK-NEXT: str q3, [x0] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: smlsl2d_chain_with_constant: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov w8, #257 // =0x101 +; CHECK-SD-NEXT: dup v3.2d, x8 +; CHECK-SD-NEXT: smlsl v3.2d, v0.2s, v2.2s +; CHECK-SD-NEXT: mvn v0.8b, v2.8b +; CHECK-SD-NEXT: smlsl v3.2d, v1.2s, v0.2s +; CHECK-SD-NEXT: str q3, [x0] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: smlsl2d_chain_with_constant: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: adrp x8, .LCPI31_0 +; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI31_0] +; CHECK-GI-NEXT: smlsl v3.2d, v0.2s, v2.2s +; CHECK-GI-NEXT: mvn v0.8b, v2.8b +; CHECK-GI-NEXT: smlsl v3.2d, v1.2s, v0.2s +; CHECK-GI-NEXT: str q3, [x0] +; CHECK-GI-NEXT: ret %xor = xor <2 x i32> %v3, <i32 -1, i32 -1> %smull.1 = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %v1, <2 x i32> %v3) %sub.1 = sub <2 x i64> <i64 257, i64 257>, %smull.1 @@ -502,7 +593,7 @@ define <4 x i32> @sqdmlal4s(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-NEXT: ldr d1, [x0] ; CHECK-NEXT: ldr d2, [x1] ; CHECK-NEXT: ldr q0, [x2] -; CHECK-NEXT: sqdmlal.4s v0, v1, v2 +; CHECK-NEXT: sqdmlal v0.4s, v1.4h, v2.4h ; CHECK-NEXT: ret %tmp1 = load <4 x i16>, ptr %A %tmp2 = load <4 x i16>, ptr %B @@ -518,7 +609,7 @@ define <2 x i64> @sqdmlal2d(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-NEXT: ldr d1, [x0] ; CHECK-NEXT: ldr d2, [x1] ; CHECK-NEXT: ldr q0, [x2] -; CHECK-NEXT: sqdmlal.2d v0, v1, v2 +; CHECK-NEXT: sqdmlal v0.2d, v1.2s, v2.2s ; CHECK-NEXT: ret %tmp1 = load <2 x i32>, ptr %A %tmp2 = load <2 x i32>, ptr %B @@ -529,13 +620,21 @@ define <2 x i64> @sqdmlal2d(ptr %A, ptr %B, ptr %C) nounwind { } define <4 x i32> @sqdmlal2_4s(ptr %A, ptr %B, ptr %C) nounwind { -; CHECK-LABEL: sqdmlal2_4s: -; CHECK: // %bb.0: -; CHECK-NEXT: ldr q0, [x2] -; CHECK-NEXT: ldr d1, [x0, #8] -; CHECK-NEXT: ldr d2, [x1, #8] -; CHECK-NEXT: sqdmlal.4s v0, v1, v2 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: sqdmlal2_4s: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ldr q0, [x2] +; CHECK-SD-NEXT: ldr d1, [x0, #8] +; CHECK-SD-NEXT: ldr d2, [x1, #8] +; CHECK-SD-NEXT: sqdmlal v0.4s, v1.4h, v2.4h +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: sqdmlal2_4s: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: ldr q1, [x0] +; CHECK-GI-NEXT: ldr q2, [x1] +; CHECK-GI-NEXT: ldr q0, [x2] +; CHECK-GI-NEXT: sqdmlal2 v0.4s, v1.8h, v2.8h +; CHECK-GI-NEXT: ret %load1 = load <8 x i16>, ptr %A %load2 = load <8 x i16>, ptr %B %tmp3 = load <4 x i32>, ptr %C @@ -547,13 +646,21 @@ define <4 x i32> @sqdmlal2_4s(ptr %A, ptr %B, ptr %C) nounwind { } define <2 x i64> @sqdmlal2_2d(ptr %A, ptr %B, ptr %C) nounwind { -; CHECK-LABEL: sqdmlal2_2d: -; CHECK: // %bb.0: -; CHECK-NEXT: ldr q0, [x2] -; CHECK-NEXT: ldr d1, [x0, #8] -; CHECK-NEXT: ldr d2, [x1, #8] -; CHECK-NEXT: sqdmlal.2d v0, v1, v2 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: sqdmlal2_2d: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ldr q0, [x2] +; CHECK-SD-NEXT: ldr d1, [x0, #8] +; CHECK-SD-NEXT: ldr d2, [x1, #8] +; CHECK-SD-NEXT: sqdmlal v0.2d, v1.2s, v2.2s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: sqdmlal2_2d: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: ldr q1, [x0] +; CHECK-GI-NEXT: ldr q2, [x1] +; CHECK-GI-NEXT: ldr q0, [x2] +; CHECK-GI-NEXT: sqdmlal2 v0.2d, v1.4s, v2.4s +; CHECK-GI-NEXT: ret %load1 = load <4 x i32>, ptr %A %load2 = load <4 x i32>, ptr %B %tmp3 = load <2 x i64>, ptr %C @@ -570,7 +677,7 @@ define <4 x i32> @sqdmlsl4s(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-NEXT: ldr d1, [x0] ; CHECK-NEXT: ldr d2, [x1] ; CHECK-NEXT: ldr q0, [x2] -; CHECK-NEXT: sqdmlsl.4s v0, v1, v2 +; CHECK-NEXT: sqdmlsl v0.4s, v1.4h, v2.4h ; CHECK-NEXT: ret %tmp1 = load <4 x i16>, ptr %A %tmp2 = load <4 x i16>, ptr %B @@ -586,7 +693,7 @@ define <2 x i64> @sqdmlsl2d(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-NEXT: ldr d1, [x0] ; CHECK-NEXT: ldr d2, [x1] ; CHECK-NEXT: ldr q0, [x2] -; CHECK-NEXT: sqdmlsl.2d v0, v1, v2 +; CHECK-NEXT: sqdmlsl v0.2d, v1.2s, v2.2s ; CHECK-NEXT: ret %tmp1 = load <2 x i32>, ptr %A %tmp2 = load <2 x i32>, ptr %B @@ -597,13 +704,21 @@ define <2 x i64> @sqdmlsl2d(ptr %A, ptr %B, ptr %C) nounwind { } define <4 x i32> @sqdmlsl2_4s(ptr %A, ptr %B, ptr %C) nounwind { -; CHECK-LABEL: sqdmlsl2_4s: -; CHECK: // %bb.0: -; CHECK-NEXT: ldr q0, [x2] -; CHECK-NEXT: ldr d1, [x0, #8] -; CHECK-NEXT: ldr d2, [x1, #8] -; CHECK-NEXT: sqdmlsl.4s v0, v1, v2 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: sqdmlsl2_4s: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ldr q0, [x2] +; CHECK-SD-NEXT: ldr d1, [x0, #8] +; CHECK-SD-NEXT: ldr d2, [x1, #8] +; CHECK-SD-NEXT: sqdmlsl v0.4s, v1.4h, v2.4h +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: sqdmlsl2_4s: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: ldr q1, [x0] +; CHECK-GI-NEXT: ldr q2, [x1] +; CHECK-GI-NEXT: ldr q0, [x2] +; CHECK-GI-NEXT: sqdmlsl2 v0.4s, v1.8h, v2.8h +; CHECK-GI-NEXT: ret %load1 = load <8 x i16>, ptr %A %load2 = load <8 x i16>, ptr %B %tmp3 = load <4 x i32>, ptr %C @@ -615,13 +730,21 @@ define <4 x i32> @sqdmlsl2_4s(ptr %A, ptr %B, ptr %C) nounwind { } define <2 x i64> @sqdmlsl2_2d(ptr %A, ptr %B, ptr %C) nounwind { -; CHECK-LABEL: sqdmlsl2_2d: -; CHECK: // %bb.0: -; CHECK-NEXT: ldr q0, [x2] -; CHECK-NEXT: ldr d1, [x0, #8] -; CHECK-NEXT: ldr d2, [x1, #8] -; CHECK-NEXT: sqdmlsl.2d v0, v1, v2 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: sqdmlsl2_2d: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ldr q0, [x2] +; CHECK-SD-NEXT: ldr d1, [x0, #8] +; CHECK-SD-NEXT: ldr d2, [x1, #8] +; CHECK-SD-NEXT: sqdmlsl v0.2d, v1.2s, v2.2s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: sqdmlsl2_2d: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: ldr q1, [x0] +; CHECK-GI-NEXT: ldr q2, [x1] +; CHECK-GI-NEXT: ldr q0, [x2] +; CHECK-GI-NEXT: sqdmlsl2 v0.2d, v1.4s, v2.4s +; CHECK-GI-NEXT: ret %load1 = load <4 x i32>, ptr %A %load2 = load <4 x i32>, ptr %B %tmp3 = load <2 x i64>, ptr %C @@ -638,7 +761,7 @@ define <4 x i32> @umlal4s(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-NEXT: ldr d1, [x0] ; CHECK-NEXT: ldr d2, [x1] ; CHECK-NEXT: ldr q0, [x2] -; CHECK-NEXT: umlal.4s v0, v1, v2 +; CHECK-NEXT: umlal v0.4s, v1.4h, v2.4h ; CHECK-NEXT: ret %tmp1 = load <4 x i16>, ptr %A %tmp2 = load <4 x i16>, ptr %B @@ -654,7 +777,7 @@ define <2 x i64> @umlal2d(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-NEXT: ldr d1, [x0] ; CHECK-NEXT: ldr d2, [x1] ; CHECK-NEXT: ldr q0, [x2] -; CHECK-NEXT: umlal.2d v0, v1, v2 +; CHECK-NEXT: umlal v0.2d, v1.2s, v2.2s ; CHECK-NEXT: ret %tmp1 = load <2 x i32>, ptr %A %tmp2 = load <2 x i32>, ptr %B @@ -665,14 +788,24 @@ define <2 x i64> @umlal2d(ptr %A, ptr %B, ptr %C) nounwind { } define void @umlal8h_chain_with_constant(ptr %dst, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3) { -; CHECK-LABEL: umlal8h_chain_with_constant: -; CHECK: // %bb.0: -; CHECK-NEXT: movi.16b v3, #1 -; CHECK-NEXT: umlal.8h v3, v0, v2 -; CHECK-NEXT: mvn.8b v0, v2 -; CHECK-NEXT: umlal.8h v3, v1, v0 -; CHECK-NEXT: str q3, [x0] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: umlal8h_chain_with_constant: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: movi v3.16b, #1 +; CHECK-SD-NEXT: umlal v3.8h, v0.8b, v2.8b +; CHECK-SD-NEXT: mvn v0.8b, v2.8b +; CHECK-SD-NEXT: umlal v3.8h, v1.8b, v0.8b +; CHECK-SD-NEXT: str q3, [x0] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: umlal8h_chain_with_constant: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mvn v3.8b, v2.8b +; CHECK-GI-NEXT: umull v1.8h, v1.8b, v3.8b +; CHECK-GI-NEXT: movi v3.16b, #1 +; CHECK-GI-NEXT: umlal v1.8h, v0.8b, v2.8b +; CHECK-GI-NEXT: add v0.8h, v1.8h, v3.8h +; CHECK-GI-NEXT: str q0, [x0] +; CHECK-GI-NEXT: ret %xor = xor <8 x i8> %v3, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> %umull.1 = tail call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> %v1, <8 x i8> %v3) %add.1 = add <8 x i16> %umull.1, <i16 257, i16 257, i16 257, i16 257, i16 257, i16 257, i16 257, i16 257> @@ -683,15 +816,26 @@ define void @umlal8h_chain_with_constant(ptr %dst, <8 x i8> %v1, <8 x i8> %v2, < } define void @umlal2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2, <2 x i32> %v3) { -; CHECK-LABEL: umlal2d_chain_with_constant: -; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, #257 // =0x101 -; CHECK-NEXT: dup.2d v3, x8 -; CHECK-NEXT: umlal.2d v3, v0, v2 -; CHECK-NEXT: mvn.8b v0, v2 -; CHECK-NEXT: umlal.2d v3, v1, v0 -; CHECK-NEXT: str q3, [x0] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: umlal2d_chain_with_constant: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov w8, #257 // =0x101 +; CHECK-SD-NEXT: dup v3.2d, x8 +; CHECK-SD-NEXT: umlal v3.2d, v0.2s, v2.2s +; CHECK-SD-NEXT: mvn v0.8b, v2.8b +; CHECK-SD-NEXT: umlal v3.2d, v1.2s, v0.2s +; CHECK-SD-NEXT: str q3, [x0] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: umlal2d_chain_with_constant: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mvn v3.8b, v2.8b +; CHECK-GI-NEXT: adrp x8, .LCPI43_0 +; CHECK-GI-NEXT: umull v1.2d, v1.2s, v3.2s +; CHECK-GI-NEXT: umlal v1.2d, v0.2s, v2.2s +; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI43_0] +; CHECK-GI-NEXT: add v0.2d, v1.2d, v0.2d +; CHECK-GI-NEXT: str q0, [x0] +; CHECK-GI-NEXT: ret %xor = xor <2 x i32> %v3, <i32 -1, i32 -1> %umull.1 = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %v1, <2 x i32> %v3) %add.1 = add <2 x i64> %umull.1, <i64 257, i64 257> @@ -707,7 +851,7 @@ define <4 x i32> @umlsl4s(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-NEXT: ldr d1, [x0] ; CHECK-NEXT: ldr d2, [x1] ; CHECK-NEXT: ldr q0, [x2] -; CHECK-NEXT: umlsl.4s v0, v1, v2 +; CHECK-NEXT: umlsl v0.4s, v1.4h, v2.4h ; CHECK-NEXT: ret %tmp1 = load <4 x i16>, ptr %A %tmp2 = load <4 x i16>, ptr %B @@ -723,7 +867,7 @@ define <2 x i64> @umlsl2d(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-NEXT: ldr d1, [x0] ; CHECK-NEXT: ldr d2, [x1] ; CHECK-NEXT: ldr q0, [x2] -; CHECK-NEXT: umlsl.2d v0, v1, v2 +; CHECK-NEXT: umlsl v0.2d, v1.2s, v2.2s ; CHECK-NEXT: ret %tmp1 = load <2 x i32>, ptr %A %tmp2 = load <2 x i32>, ptr %B @@ -736,10 +880,10 @@ define <2 x i64> @umlsl2d(ptr %A, ptr %B, ptr %C) nounwind { define void @umlsl8h_chain_with_constant(ptr %dst, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3) { ; CHECK-LABEL: umlsl8h_chain_with_constant: ; CHECK: // %bb.0: -; CHECK-NEXT: movi.16b v3, #1 -; CHECK-NEXT: umlsl.8h v3, v0, v2 -; CHECK-NEXT: mvn.8b v0, v2 -; CHECK-NEXT: umlsl.8h v3, v1, v0 +; CHECK-NEXT: movi v3.16b, #1 +; CHECK-NEXT: umlsl v3.8h, v0.8b, v2.8b +; CHECK-NEXT: mvn v0.8b, v2.8b +; CHECK-NEXT: umlsl v3.8h, v1.8b, v0.8b ; CHECK-NEXT: str q3, [x0] ; CHECK-NEXT: ret %xor = xor <8 x i8> %v3, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> @@ -752,15 +896,25 @@ define void @umlsl8h_chain_with_constant(ptr %dst, <8 x i8> %v1, <8 x i8> %v2, < } define void @umlsl2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2, <2 x i32> %v3) { -; CHECK-LABEL: umlsl2d_chain_with_constant: -; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, #257 // =0x101 -; CHECK-NEXT: dup.2d v3, x8 -; CHECK-NEXT: umlsl.2d v3, v0, v2 -; CHECK-NEXT: mvn.8b v0, v2 -; CHECK-NEXT: umlsl.2d v3, v1, v0 -; CHECK-NEXT: str q3, [x0] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: umlsl2d_chain_with_constant: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov w8, #257 // =0x101 +; CHECK-SD-NEXT: dup v3.2d, x8 +; CHECK-SD-NEXT: umlsl v3.2d, v0.2s, v2.2s +; CHECK-SD-NEXT: mvn v0.8b, v2.8b +; CHECK-SD-NEXT: umlsl v3.2d, v1.2s, v0.2s +; CHECK-SD-NEXT: str q3, [x0] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: umlsl2d_chain_with_constant: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: adrp x8, .LCPI47_0 +; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI47_0] +; CHECK-GI-NEXT: umlsl v3.2d, v0.2s, v2.2s +; CHECK-GI-NEXT: mvn v0.8b, v2.8b +; CHECK-GI-NEXT: umlsl v3.2d, v1.2s, v0.2s +; CHECK-GI-NEXT: str q3, [x0] +; CHECK-GI-NEXT: ret %xor = xor <2 x i32> %v3, <i32 -1, i32 -1> %umull.1 = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %v1, <2 x i32> %v3) %add.1 = sub <2 x i64> <i64 257, i64 257>, %umull.1 @@ -776,7 +930,7 @@ define <2 x float> @fmla_2s(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-NEXT: ldr d1, [x0] ; CHECK-NEXT: ldr d2, [x1] ; CHECK-NEXT: ldr d0, [x2] -; CHECK-NEXT: fmla.2s v0, v2, v1 +; CHECK-NEXT: fmla v0.2s, v2.2s, v1.2s ; CHECK-NEXT: ret %tmp1 = load <2 x float>, ptr %A %tmp2 = load <2 x float>, ptr %B @@ -791,7 +945,7 @@ define <4 x float> @fmla_4s(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-NEXT: ldr q1, [x0] ; CHECK-NEXT: ldr q2, [x1] ; CHECK-NEXT: ldr q0, [x2] -; CHECK-NEXT: fmla.4s v0, v2, v1 +; CHECK-NEXT: fmla v0.4s, v2.4s, v1.4s ; CHECK-NEXT: ret %tmp1 = load <4 x float>, ptr %A %tmp2 = load <4 x float>, ptr %B @@ -806,7 +960,7 @@ define <2 x double> @fmla_2d(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-NEXT: ldr q1, [x0] ; CHECK-NEXT: ldr q2, [x1] ; CHECK-NEXT: ldr q0, [x2] -; CHECK-NEXT: fmla.2d v0, v2, v1 +; CHECK-NEXT: fmla v0.2d, v2.2d, v1.2d ; CHECK-NEXT: ret %tmp1 = load <2 x double>, ptr %A %tmp2 = load <2 x double>, ptr %B @@ -825,7 +979,7 @@ define <2 x float> @fmls_2s(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-NEXT: ldr d1, [x0] ; CHECK-NEXT: ldr d2, [x1] ; CHECK-NEXT: ldr d0, [x2] -; CHECK-NEXT: fmls.2s v0, v1, v2 +; CHECK-NEXT: fmls v0.2s, v1.2s, v2.2s ; CHECK-NEXT: ret %tmp1 = load <2 x float>, ptr %A %tmp2 = load <2 x float>, ptr %B @@ -841,7 +995,7 @@ define <4 x float> @fmls_4s(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-NEXT: ldr q1, [x0] ; CHECK-NEXT: ldr q2, [x1] ; CHECK-NEXT: ldr q0, [x2] -; CHECK-NEXT: fmls.4s v0, v1, v2 +; CHECK-NEXT: fmls v0.4s, v1.4s, v2.4s ; CHECK-NEXT: ret %tmp1 = load <4 x float>, ptr %A %tmp2 = load <4 x float>, ptr %B @@ -857,7 +1011,7 @@ define <2 x double> @fmls_2d(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-NEXT: ldr q1, [x0] ; CHECK-NEXT: ldr q2, [x1] ; CHECK-NEXT: ldr q0, [x2] -; CHECK-NEXT: fmls.2d v0, v1, v2 +; CHECK-NEXT: fmls v0.2d, v1.2d, v2.2d ; CHECK-NEXT: ret %tmp1 = load <2 x double>, ptr %A %tmp2 = load <2 x double>, ptr %B @@ -873,7 +1027,7 @@ define <2 x float> @fmls_commuted_neg_2s(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-NEXT: ldr d1, [x0] ; CHECK-NEXT: ldr d2, [x1] ; CHECK-NEXT: ldr d0, [x2] -; CHECK-NEXT: fmls.2s v0, v1, v2 +; CHECK-NEXT: fmls v0.2s, v1.2s, v2.2s ; CHECK-NEXT: ret %tmp1 = load <2 x float>, ptr %A %tmp2 = load <2 x float>, ptr %B @@ -889,7 +1043,7 @@ define <4 x float> @fmls_commuted_neg_4s(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-NEXT: ldr q1, [x0] ; CHECK-NEXT: ldr q2, [x1] ; CHECK-NEXT: ldr q0, [x2] -; CHECK-NEXT: fmls.4s v0, v1, v2 +; CHECK-NEXT: fmls v0.4s, v1.4s, v2.4s ; CHECK-NEXT: ret %tmp1 = load <4 x float>, ptr %A %tmp2 = load <4 x float>, ptr %B @@ -905,7 +1059,7 @@ define <2 x double> @fmls_commuted_neg_2d(ptr %A, ptr %B, ptr %C) nounwind { ; CHECK-NEXT: ldr q1, [x0] ; CHECK-NEXT: ldr q2, [x1] ; CHECK-NEXT: ldr q0, [x2] -; CHECK-NEXT: fmls.2d v0, v1, v2 +; CHECK-NEXT: fmls v0.2d, v1.2d, v2.2d ; CHECK-NEXT: ret %tmp1 = load <2 x double>, ptr %A %tmp2 = load <2 x double>, ptr %B @@ -919,7 +1073,7 @@ define <2 x float> @fmls_indexed_2s(<2 x float> %a, <2 x float> %b, <2 x float> ; CHECK-LABEL: fmls_indexed_2s: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: fmls.2s v0, v2, v1[0] +; CHECK-NEXT: fmls v0.2s, v2.2s, v1.s[0] ; CHECK-NEXT: ret entry: %0 = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %c @@ -931,7 +1085,7 @@ entry: define <4 x float> @fmls_indexed_4s(<4 x float> %a, <4 x float> %b, <4 x float> %c) nounwind readnone ssp { ; CHECK-LABEL: fmls_indexed_4s: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: fmls.4s v0, v2, v1[0] +; CHECK-NEXT: fmls v0.4s, v2.4s, v1.s[0] ; CHECK-NEXT: ret entry: %0 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %c @@ -943,7 +1097,7 @@ entry: define <2 x double> @fmls_indexed_2d(<2 x double> %a, <2 x double> %b, <2 x double> %c) nounwind readnone ssp { ; CHECK-LABEL: fmls_indexed_2d: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: fmls.2d v0, v2, v1[0] +; CHECK-NEXT: fmls v0.2d, v2.2d, v1.d[0] ; CHECK-NEXT: ret entry: %0 = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %c @@ -956,7 +1110,7 @@ define <2 x float> @fmla_indexed_scalar_2s(<2 x float> %a, <2 x float> %b, float ; CHECK-LABEL: fmla_indexed_scalar_2s: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: // kill: def $s2 killed $s2 def $d2 -; CHECK-NEXT: fmla.2s v0, v1, v2 +; CHECK-NEXT: fmla v0.2s, v1.2s, v2.2s ; CHECK-NEXT: ret entry: %v1 = insertelement <2 x float> undef, float %c, i32 0 @@ -969,7 +1123,7 @@ define <4 x float> @fmla_indexed_scalar_4s(<4 x float> %a, <4 x float> %b, float ; CHECK-LABEL: fmla_indexed_scalar_4s: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: // kill: def $s2 killed $s2 def $q2 -; CHECK-NEXT: fmla.4s v0, v1, v2[0] +; CHECK-NEXT: fmla v0.4s, v1.4s, v2.s[0] ; CHECK-NEXT: ret entry: %v1 = insertelement <4 x float> undef, float %c, i32 0 @@ -984,7 +1138,7 @@ define <2 x double> @fmla_indexed_scalar_2d(<2 x double> %a, <2 x double> %b, do ; CHECK-LABEL: fmla_indexed_scalar_2d: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: fmla.2d v0, v1, v2[0] +; CHECK-NEXT: fmla v0.2d, v1.2d, v2.d[0] ; CHECK-NEXT: ret entry: %v1 = insertelement <2 x double> undef, double %c, i32 0 @@ -997,7 +1151,7 @@ define <2 x float> @fmls_indexed_2s_strict(<2 x float> %a, <2 x float> %b, <2 x ; CHECK-LABEL: fmls_indexed_2s_strict: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: fmls.2s v0, v2, v1[0] +; CHECK-NEXT: fmls v0.2s, v2.2s, v1.s[0] ; CHECK-NEXT: ret entry: %0 = fneg <2 x float> %c @@ -1009,7 +1163,7 @@ entry: define <4 x float> @fmls_indexed_4s_strict(<4 x float> %a, <4 x float> %b, <4 x float> %c) nounwind readnone ssp strictfp { ; CHECK-LABEL: fmls_indexed_4s_strict: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: fmls.4s v0, v2, v1[0] +; CHECK-NEXT: fmls v0.4s, v2.4s, v1.s[0] ; CHECK-NEXT: ret entry: %0 = fneg <4 x float> %c @@ -1021,7 +1175,7 @@ entry: define <2 x double> @fmls_indexed_2d_strict(<2 x double> %a, <2 x double> %b, <2 x double> %c) nounwind readnone ssp strictfp { ; CHECK-LABEL: fmls_indexed_2d_strict: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: fmls.2d v0, v2, v1[0] +; CHECK-NEXT: fmls v0.2d, v2.2d, v1.d[0] ; CHECK-NEXT: ret entry: %0 = fneg <2 x double> %c @@ -1034,7 +1188,7 @@ define <2 x float> @fmla_indexed_scalar_2s_strict(<2 x float> %a, <2 x float> %b ; CHECK-LABEL: fmla_indexed_scalar_2s_strict: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: // kill: def $s2 killed $s2 def $q2 -; CHECK-NEXT: fmla.2s v0, v1, v2[0] +; CHECK-NEXT: fmla v0.2s, v1.2s, v2.s[0] ; CHECK-NEXT: ret entry: %v1 = insertelement <2 x float> undef, float %c, i32 0 @@ -1047,7 +1201,7 @@ define <4 x float> @fmla_indexed_scalar_4s_strict(<4 x float> %a, <4 x float> %b ; CHECK-LABEL: fmla_indexed_scalar_4s_strict: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: // kill: def $s2 killed $s2 def $q2 -; CHECK-NEXT: fmla.4s v0, v1, v2[0] +; CHECK-NEXT: fmla v0.4s, v1.4s, v2.s[0] ; CHECK-NEXT: ret entry: %v1 = insertelement <4 x float> undef, float %c, i32 0 @@ -1062,7 +1216,7 @@ define <2 x double> @fmla_indexed_scalar_2d_strict(<2 x double> %a, <2 x double> ; CHECK-LABEL: fmla_indexed_scalar_2d_strict: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: fmla.2d v0, v1, v2[0] +; CHECK-NEXT: fmla v0.2d, v1.2d, v2.d[0] ; CHECK-NEXT: ret entry: %v1 = insertelement <2 x double> undef, double %c, i32 0 @@ -1081,7 +1235,7 @@ define <4 x i16> @mul_4h(<4 x i16> %A, <4 x i16> %B) nounwind { ; CHECK-LABEL: mul_4h: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: mul.4h v0, v0, v1[1] +; CHECK-NEXT: mul v0.4h, v0.4h, v1.h[1] ; CHECK-NEXT: ret %tmp3 = shufflevector <4 x i16> %B, <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1> %tmp4 = mul <4 x i16> %A, %tmp3 @@ -1091,7 +1245,7 @@ define <4 x i16> @mul_4h(<4 x i16> %A, <4 x i16> %B) nounwind { define <8 x i16> @mul_8h(<8 x i16> %A, <8 x i16> %B) nounwind { ; CHECK-LABEL: mul_8h: ; CHECK: // %bb.0: -; CHECK-NEXT: mul.8h v0, v0, v1[1] +; CHECK-NEXT: mul v0.8h, v0.8h, v1.h[1] ; CHECK-NEXT: ret %tmp3 = shufflevector <8 x i16> %B, <8 x i16> poison, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> %tmp4 = mul <8 x i16> %A, %tmp3 @@ -1102,7 +1256,7 @@ define <2 x i32> @mul_2s(<2 x i32> %A, <2 x i32> %B) nounwind { ; CHECK-LABEL: mul_2s: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: mul.2s v0, v0, v1[1] +; CHECK-NEXT: mul v0.2s, v0.2s, v1.s[1] ; CHECK-NEXT: ret %tmp3 = shufflevector <2 x i32> %B, <2 x i32> poison, <2 x i32> <i32 1, i32 1> %tmp4 = mul <2 x i32> %A, %tmp3 @@ -1112,7 +1266,7 @@ define <2 x i32> @mul_2s(<2 x i32> %A, <2 x i32> %B) nounwind { define <4 x i32> @mul_4s(<4 x i32> %A, <4 x i32> %B) nounwind { ; CHECK-LABEL: mul_4s: ; CHECK: // %bb.0: -; CHECK-NEXT: mul.4s v0, v0, v1[1] +; CHECK-NEXT: mul v0.4s, v0.4s, v1.s[1] ; CHECK-NEXT: ret %tmp3 = shufflevector <4 x i32> %B, <4 x i32> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1> %tmp4 = mul <4 x i32> %A, %tmp3 @@ -1120,17 +1274,29 @@ define <4 x i32> @mul_4s(<4 x i32> %A, <4 x i32> %B) nounwind { } define <2 x i64> @mul_2d(<2 x i64> %A, <2 x i64> %B) nounwind { -; CHECK-LABEL: mul_2d: -; CHECK: // %bb.0: -; CHECK-NEXT: fmov x10, d1 -; CHECK-NEXT: fmov x11, d0 -; CHECK-NEXT: mov.d x8, v1[1] -; CHECK-NEXT: mov.d x9, v0[1] -; CHECK-NEXT: mul x10, x11, x10 -; CHECK-NEXT: mul x8, x9, x8 -; CHECK-NEXT: fmov d0, x10 -; CHECK-NEXT: mov.d v0[1], x8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: mul_2d: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fmov x10, d1 +; CHECK-SD-NEXT: fmov x11, d0 +; CHECK-SD-NEXT: mov x8, v1.d[1] +; CHECK-SD-NEXT: mov x9, v0.d[1] +; CHECK-SD-NEXT: mul x10, x11, x10 +; CHECK-SD-NEXT: mul x8, x9, x8 +; CHECK-SD-NEXT: fmov d0, x10 +; CHECK-SD-NEXT: mov v0.d[1], x8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: mul_2d: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fmov x10, d0 +; CHECK-GI-NEXT: fmov x11, d1 +; CHECK-GI-NEXT: mov x8, v0.d[1] +; CHECK-GI-NEXT: mov x9, v1.d[1] +; CHECK-GI-NEXT: mul x10, x10, x11 +; CHECK-GI-NEXT: mul x8, x8, x9 +; CHECK-GI-NEXT: fmov d0, x10 +; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: ret %tmp1 = mul <2 x i64> %A, %B ret <2 x i64> %tmp1 } @@ -1139,7 +1305,7 @@ define <2 x float> @fmul_lane_2s(<2 x float> %A, <2 x float> %B) nounwind { ; CHECK-LABEL: fmul_lane_2s: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: fmul.2s v0, v0, v1[1] +; CHECK-NEXT: fmul v0.2s, v0.2s, v1.s[1] ; CHECK-NEXT: ret %tmp3 = shufflevector <2 x float> %B, <2 x float> poison, <2 x i32> <i32 1, i32 1> %tmp4 = fmul <2 x float> %A, %tmp3 @@ -1149,7 +1315,7 @@ define <2 x float> @fmul_lane_2s(<2 x float> %A, <2 x float> %B) nounwind { define <4 x float> @fmul_lane_4s(<4 x float> %A, <4 x float> %B) nounwind { ; CHECK-LABEL: fmul_lane_4s: ; CHECK: // %bb.0: -; CHECK-NEXT: fmul.4s v0, v0, v1[1] +; CHECK-NEXT: fmul v0.4s, v0.4s, v1.s[1] ; CHECK-NEXT: ret %tmp3 = shufflevector <4 x float> %B, <4 x float> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1> %tmp4 = fmul <4 x float> %A, %tmp3 @@ -1159,7 +1325,7 @@ define <4 x float> @fmul_lane_4s(<4 x float> %A, <4 x float> %B) nounwind { define <2 x double> @fmul_lane_2d(<2 x double> %A, <2 x double> %B) nounwind { ; CHECK-LABEL: fmul_lane_2d: ; CHECK: // %bb.0: -; CHECK-NEXT: fmul.2d v0, v0, v1[1] +; CHECK-NEXT: fmul v0.2d, v0.2d, v1.d[1] ; CHECK-NEXT: ret %tmp3 = shufflevector <2 x double> %B, <2 x double> poison, <2 x i32> <i32 1, i32 1> %tmp4 = fmul <2 x double> %A, %tmp3 @@ -1169,7 +1335,7 @@ define <2 x double> @fmul_lane_2d(<2 x double> %A, <2 x double> %B) nounwind { define float @fmul_lane_s(float %A, <4 x float> %vec) nounwind { ; CHECK-LABEL: fmul_lane_s: ; CHECK: // %bb.0: -; CHECK-NEXT: fmul.s s0, s0, v1[3] +; CHECK-NEXT: fmul s0, s0, v1.s[3] ; CHECK-NEXT: ret %B = extractelement <4 x float> %vec, i32 3 %res = fmul float %A, %B @@ -1179,7 +1345,7 @@ define float @fmul_lane_s(float %A, <4 x float> %vec) nounwind { define double @fmul_lane_d(double %A, <2 x double> %vec) nounwind { ; CHECK-LABEL: fmul_lane_d: ; CHECK: // %bb.0: -; CHECK-NEXT: fmul.d d0, d0, v1[1] +; CHECK-NEXT: fmul d0, d0, v1.d[1] ; CHECK-NEXT: ret %B = extractelement <2 x double> %vec, i32 1 %res = fmul double %A, %B @@ -1192,7 +1358,7 @@ define <2 x float> @fmulx_lane_2s(<2 x float> %A, <2 x float> %B) nounwind { ; CHECK-LABEL: fmulx_lane_2s: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: fmulx.2s v0, v0, v1[1] +; CHECK-NEXT: fmulx v0.2s, v0.2s, v1.s[1] ; CHECK-NEXT: ret %tmp3 = shufflevector <2 x float> %B, <2 x float> poison, <2 x i32> <i32 1, i32 1> %tmp4 = call <2 x float> @llvm.aarch64.neon.fmulx.v2f32(<2 x float> %A, <2 x float> %tmp3) @@ -1202,7 +1368,7 @@ define <2 x float> @fmulx_lane_2s(<2 x float> %A, <2 x float> %B) nounwind { define <4 x float> @fmulx_lane_4s(<4 x float> %A, <4 x float> %B) nounwind { ; CHECK-LABEL: fmulx_lane_4s: ; CHECK: // %bb.0: -; CHECK-NEXT: fmulx.4s v0, v0, v1[1] +; CHECK-NEXT: fmulx v0.4s, v0.4s, v1.s[1] ; CHECK-NEXT: ret %tmp3 = shufflevector <4 x float> %B, <4 x float> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1> %tmp4 = call <4 x float> @llvm.aarch64.neon.fmulx.v4f32(<4 x float> %A, <4 x float> %tmp3) @@ -1212,7 +1378,7 @@ define <4 x float> @fmulx_lane_4s(<4 x float> %A, <4 x float> %B) nounwind { define <2 x double> @fmulx_lane_2d(<2 x double> %A, <2 x double> %B) nounwind { ; CHECK-LABEL: fmulx_lane_2d: ; CHECK: // %bb.0: -; CHECK-NEXT: fmulx.2d v0, v0, v1[1] +; CHECK-NEXT: fmulx v0.2d, v0.2d, v1.d[1] ; CHECK-NEXT: ret %tmp3 = shufflevector <2 x double> %B, <2 x double> poison, <2 x i32> <i32 1, i32 1> %tmp4 = call <2 x double> @llvm.aarch64.neon.fmulx.v2f64(<2 x double> %A, <2 x double> %tmp3) @@ -1223,7 +1389,7 @@ define <4 x i16> @sqdmulh_lane_4h(<4 x i16> %A, <4 x i16> %B) nounwind { ; CHECK-LABEL: sqdmulh_lane_4h: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: sqdmulh.4h v0, v0, v1[1] +; CHECK-NEXT: sqdmulh v0.4h, v0.4h, v1.h[1] ; CHECK-NEXT: ret %tmp3 = shufflevector <4 x i16> %B, <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1> %tmp4 = call <4 x i16> @llvm.aarch64.neon.sqdmulh.v4i16(<4 x i16> %A, <4 x i16> %tmp3) @@ -1233,7 +1399,7 @@ define <4 x i16> @sqdmulh_lane_4h(<4 x i16> %A, <4 x i16> %B) nounwind { define <8 x i16> @sqdmulh_lane_8h(<8 x i16> %A, <8 x i16> %B) nounwind { ; CHECK-LABEL: sqdmulh_lane_8h: ; CHECK: // %bb.0: -; CHECK-NEXT: sqdmulh.8h v0, v0, v1[1] +; CHECK-NEXT: sqdmulh v0.8h, v0.8h, v1.h[1] ; CHECK-NEXT: ret %tmp3 = shufflevector <8 x i16> %B, <8 x i16> poison, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> %tmp4 = call <8 x i16> @llvm.aarch64.neon.sqdmulh.v8i16(<8 x i16> %A, <8 x i16> %tmp3) @@ -1244,7 +1410,7 @@ define <2 x i32> @sqdmulh_lane_2s(<2 x i32> %A, <2 x i32> %B) nounwind { ; CHECK-LABEL: sqdmulh_lane_2s: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: sqdmulh.2s v0, v0, v1[1] +; CHECK-NEXT: sqdmulh v0.2s, v0.2s, v1.s[1] ; CHECK-NEXT: ret %tmp3 = shufflevector <2 x i32> %B, <2 x i32> poison, <2 x i32> <i32 1, i32 1> %tmp4 = call <2 x i32> @llvm.aarch64.neon.sqdmulh.v2i32(<2 x i32> %A, <2 x i32> %tmp3) @@ -1254,7 +1420,7 @@ define <2 x i32> @sqdmulh_lane_2s(<2 x i32> %A, <2 x i32> %B) nounwind { define <4 x i32> @sqdmulh_lane_4s(<4 x i32> %A, <4 x i32> %B) nounwind { ; CHECK-LABEL: sqdmulh_lane_4s: ; CHECK: // %bb.0: -; CHECK-NEXT: sqdmulh.4s v0, v0, v1[1] +; CHECK-NEXT: sqdmulh v0.4s, v0.4s, v1.s[1] ; CHECK-NEXT: ret %tmp3 = shufflevector <4 x i32> %B, <4 x i32> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1> %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqdmulh.v4i32(<4 x i32> %A, <4 x i32> %tmp3) @@ -1265,7 +1431,7 @@ define i32 @sqdmulh_lane_1s(i32 %A, <4 x i32> %B) nounwind { ; CHECK-LABEL: sqdmulh_lane_1s: ; CHECK: // %bb.0: ; CHECK-NEXT: fmov s1, w0 -; CHECK-NEXT: sqdmulh.s s0, s1, v0[1] +; CHECK-NEXT: sqdmulh s0, s1, v0.s[1] ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %tmp1 = extractelement <4 x i32> %B, i32 1 @@ -1277,7 +1443,7 @@ define <4 x i16> @sqrdmulh_lane_4h(<4 x i16> %A, <4 x i16> %B) nounwind { ; CHECK-LABEL: sqrdmulh_lane_4h: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: sqrdmulh.4h v0, v0, v1[1] +; CHECK-NEXT: sqrdmulh v0.4h, v0.4h, v1.h[1] ; CHECK-NEXT: ret %tmp3 = shufflevector <4 x i16> %B, <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1> %tmp4 = call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> %A, <4 x i16> %tmp3) @@ -1287,7 +1453,7 @@ define <4 x i16> @sqrdmulh_lane_4h(<4 x i16> %A, <4 x i16> %B) nounwind { define <8 x i16> @sqrdmulh_lane_8h(<8 x i16> %A, <8 x i16> %B) nounwind { ; CHECK-LABEL: sqrdmulh_lane_8h: ; CHECK: // %bb.0: -; CHECK-NEXT: sqrdmulh.8h v0, v0, v1[1] +; CHECK-NEXT: sqrdmulh v0.8h, v0.8h, v1.h[1] ; CHECK-NEXT: ret %tmp3 = shufflevector <8 x i16> %B, <8 x i16> poison, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> %tmp4 = call <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16> %A, <8 x i16> %tmp3) @@ -1298,7 +1464,7 @@ define <2 x i32> @sqrdmulh_lane_2s(<2 x i32> %A, <2 x i32> %B) nounwind { ; CHECK-LABEL: sqrdmulh_lane_2s: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: sqrdmulh.2s v0, v0, v1[1] +; CHECK-NEXT: sqrdmulh v0.2s, v0.2s, v1.s[1] ; CHECK-NEXT: ret %tmp3 = shufflevector <2 x i32> %B, <2 x i32> poison, <2 x i32> <i32 1, i32 1> %tmp4 = call <2 x i32> @llvm.aarch64.neon.sqrdmulh.v2i32(<2 x i32> %A, <2 x i32> %tmp3) @@ -1308,7 +1474,7 @@ define <2 x i32> @sqrdmulh_lane_2s(<2 x i32> %A, <2 x i32> %B) nounwind { define <4 x i32> @sqrdmulh_lane_4s(<4 x i32> %A, <4 x i32> %B) nounwind { ; CHECK-LABEL: sqrdmulh_lane_4s: ; CHECK: // %bb.0: -; CHECK-NEXT: sqrdmulh.4s v0, v0, v1[1] +; CHECK-NEXT: sqrdmulh v0.4s, v0.4s, v1.s[1] ; CHECK-NEXT: ret %tmp3 = shufflevector <4 x i32> %B, <4 x i32> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1> %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqrdmulh.v4i32(<4 x i32> %A, <4 x i32> %tmp3) @@ -1319,7 +1485,7 @@ define i32 @sqrdmulh_lane_1s(i32 %A, <4 x i32> %B) nounwind { ; CHECK-LABEL: sqrdmulh_lane_1s: ; CHECK: // %bb.0: ; CHECK-NEXT: fmov s1, w0 -; CHECK-NEXT: sqrdmulh.s s0, s1, v0[1] +; CHECK-NEXT: sqrdmulh s0, s1, v0.s[1] ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %tmp1 = extractelement <4 x i32> %B, i32 1 @@ -1331,7 +1497,7 @@ define <4 x i32> @sqdmull_lane_4s(<4 x i16> %A, <4 x i16> %B) nounwind { ; CHECK-LABEL: sqdmull_lane_4s: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: sqdmull.4s v0, v0, v1[1] +; CHECK-NEXT: sqdmull v0.4s, v0.4h, v1.h[1] ; CHECK-NEXT: ret %tmp3 = shufflevector <4 x i16> %B, <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1> %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %A, <4 x i16> %tmp3) @@ -1342,7 +1508,7 @@ define <2 x i64> @sqdmull_lane_2d(<2 x i32> %A, <2 x i32> %B) nounwind { ; CHECK-LABEL: sqdmull_lane_2d: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: sqdmull.2d v0, v0, v1[1] +; CHECK-NEXT: sqdmull v0.2d, v0.2s, v1.s[1] ; CHECK-NEXT: ret %tmp3 = shufflevector <2 x i32> %B, <2 x i32> poison, <2 x i32> <i32 1, i32 1> %tmp4 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %A, <2 x i32> %tmp3) @@ -1350,10 +1516,16 @@ define <2 x i64> @sqdmull_lane_2d(<2 x i32> %A, <2 x i32> %B) nounwind { } define <4 x i32> @sqdmull2_lane_4s(<8 x i16> %A, <8 x i16> %B) nounwind { -; CHECK-LABEL: sqdmull2_lane_4s: -; CHECK: // %bb.0: -; CHECK-NEXT: sqdmull2.4s v0, v0, v1[1] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: sqdmull2_lane_4s: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sqdmull2 v0.4s, v0.8h, v1.h[1] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: sqdmull2_lane_4s: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov d0, v0.d[1] +; CHECK-GI-NEXT: sqdmull v0.4s, v0.4h, v1.h[1] +; CHECK-GI-NEXT: ret %tmp1 = shufflevector <8 x i16> %A, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7> %tmp2 = shufflevector <8 x i16> %B, <8 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2) @@ -1361,10 +1533,16 @@ define <4 x i32> @sqdmull2_lane_4s(<8 x i16> %A, <8 x i16> %B) nounwind { } define <2 x i64> @sqdmull2_lane_2d(<4 x i32> %A, <4 x i32> %B) nounwind { -; CHECK-LABEL: sqdmull2_lane_2d: -; CHECK: // %bb.0: -; CHECK-NEXT: sqdmull2.2d v0, v0, v1[1] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: sqdmull2_lane_2d: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sqdmull2 v0.2d, v0.4s, v1.s[1] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: sqdmull2_lane_2d: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov d0, v0.d[1] +; CHECK-GI-NEXT: sqdmull v0.2d, v0.2s, v1.s[1] +; CHECK-GI-NEXT: ret %tmp1 = shufflevector <4 x i32> %A, <4 x i32> undef, <2 x i32> <i32 2, i32 3> %tmp2 = shufflevector <4 x i32> %B, <4 x i32> undef, <2 x i32> <i32 1, i32 1> %tmp4 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2) @@ -1375,7 +1553,7 @@ define <4 x i32> @umull_lane_4s(<4 x i16> %A, <4 x i16> %B) nounwind { ; CHECK-LABEL: umull_lane_4s: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: umull.4s v0, v0, v1[1] +; CHECK-NEXT: umull v0.4s, v0.4h, v1.h[1] ; CHECK-NEXT: ret %tmp3 = shufflevector <4 x i16> %B, <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1> %tmp4 = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %A, <4 x i16> %tmp3) @@ -1386,7 +1564,7 @@ define <2 x i64> @umull_lane_2d(<2 x i32> %A, <2 x i32> %B) nounwind { ; CHECK-LABEL: umull_lane_2d: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: umull.2d v0, v0, v1[1] +; CHECK-NEXT: umull v0.2d, v0.2s, v1.s[1] ; CHECK-NEXT: ret %tmp3 = shufflevector <2 x i32> %B, <2 x i32> poison, <2 x i32> <i32 1, i32 1> %tmp4 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %A, <2 x i32> %tmp3) @@ -1397,7 +1575,7 @@ define <4 x i32> @smull_lane_4s(<4 x i16> %A, <4 x i16> %B) nounwind { ; CHECK-LABEL: smull_lane_4s: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: smull.4s v0, v0, v1[1] +; CHECK-NEXT: smull v0.4s, v0.4h, v1.h[1] ; CHECK-NEXT: ret %tmp3 = shufflevector <4 x i16> %B, <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1> %tmp4 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %A, <4 x i16> %tmp3) @@ -1408,7 +1586,7 @@ define <2 x i64> @smull_lane_2d(<2 x i32> %A, <2 x i32> %B) nounwind { ; CHECK-LABEL: smull_lane_2d: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: smull.2d v0, v0, v1[1] +; CHECK-NEXT: smull v0.2d, v0.2s, v1.s[1] ; CHECK-NEXT: ret %tmp3 = shufflevector <2 x i32> %B, <2 x i32> poison, <2 x i32> <i32 1, i32 1> %tmp4 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %A, <2 x i32> %tmp3) @@ -1419,8 +1597,8 @@ define <4 x i32> @smlal_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwi ; CHECK-LABEL: smlal_lane_4s: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: smlal.4s v2, v0, v1[1] -; CHECK-NEXT: mov.16b v0, v2 +; CHECK-NEXT: smlal v2.4s, v0.4h, v1.h[1] +; CHECK-NEXT: mov v0.16b, v2.16b ; CHECK-NEXT: ret %tmp4 = shufflevector <4 x i16> %B, <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1> %tmp5 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %A, <4 x i16> %tmp4) @@ -1432,8 +1610,8 @@ define <2 x i64> @smlal_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) nounwi ; CHECK-LABEL: smlal_lane_2d: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: smlal.2d v2, v0, v1[1] -; CHECK-NEXT: mov.16b v0, v2 +; CHECK-NEXT: smlal v2.2d, v0.2s, v1.s[1] +; CHECK-NEXT: mov v0.16b, v2.16b ; CHECK-NEXT: ret %tmp4 = shufflevector <2 x i32> %B, <2 x i32> poison, <2 x i32> <i32 1, i32 1> %tmp5 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %A, <2 x i32> %tmp4) @@ -1445,8 +1623,8 @@ define <4 x i32> @sqdmlal_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) noun ; CHECK-LABEL: sqdmlal_lane_4s: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: sqdmlal.4s v2, v0, v1[1] -; CHECK-NEXT: mov.16b v0, v2 +; CHECK-NEXT: sqdmlal v2.4s, v0.4h, v1.h[1] +; CHECK-NEXT: mov v0.16b, v2.16b ; CHECK-NEXT: ret %tmp4 = shufflevector <4 x i16> %B, <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1> %tmp5 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %A, <4 x i16> %tmp4) @@ -1458,8 +1636,8 @@ define <2 x i64> @sqdmlal_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) noun ; CHECK-LABEL: sqdmlal_lane_2d: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: sqdmlal.2d v2, v0, v1[1] -; CHECK-NEXT: mov.16b v0, v2 +; CHECK-NEXT: sqdmlal v2.2d, v0.2s, v1.s[1] +; CHECK-NEXT: mov v0.16b, v2.16b ; CHECK-NEXT: ret %tmp4 = shufflevector <2 x i32> %B, <2 x i32> poison, <2 x i32> <i32 1, i32 1> %tmp5 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %A, <2 x i32> %tmp4) @@ -1468,11 +1646,18 @@ define <2 x i64> @sqdmlal_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) noun } define <4 x i32> @sqdmlal2_lane_4s(<8 x i16> %A, <8 x i16> %B, <4 x i32> %C) nounwind { -; CHECK-LABEL: sqdmlal2_lane_4s: -; CHECK: // %bb.0: -; CHECK-NEXT: sqdmlal2.4s v2, v0, v1[1] -; CHECK-NEXT: mov.16b v0, v2 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: sqdmlal2_lane_4s: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sqdmlal2 v2.4s, v0.8h, v1.h[1] +; CHECK-SD-NEXT: mov v0.16b, v2.16b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: sqdmlal2_lane_4s: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov d3, v0.d[1] +; CHECK-GI-NEXT: mov v0.16b, v2.16b +; CHECK-GI-NEXT: sqdmlal v0.4s, v3.4h, v1.h[1] +; CHECK-GI-NEXT: ret %tmp1 = shufflevector <8 x i16> %A, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7> %tmp2 = shufflevector <8 x i16> %B, <8 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> %tmp5 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2) @@ -1481,11 +1666,18 @@ define <4 x i32> @sqdmlal2_lane_4s(<8 x i16> %A, <8 x i16> %B, <4 x i32> %C) nou } define <2 x i64> @sqdmlal2_lane_2d(<4 x i32> %A, <4 x i32> %B, <2 x i64> %C) nounwind { -; CHECK-LABEL: sqdmlal2_lane_2d: -; CHECK: // %bb.0: -; CHECK-NEXT: sqdmlal2.2d v2, v0, v1[1] -; CHECK-NEXT: mov.16b v0, v2 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: sqdmlal2_lane_2d: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sqdmlal2 v2.2d, v0.4s, v1.s[1] +; CHECK-SD-NEXT: mov v0.16b, v2.16b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: sqdmlal2_lane_2d: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov d3, v0.d[1] +; CHECK-GI-NEXT: mov v0.16b, v2.16b +; CHECK-GI-NEXT: sqdmlal v0.2d, v3.2s, v1.s[1] +; CHECK-GI-NEXT: ret %tmp1 = shufflevector <4 x i32> %A, <4 x i32> undef, <2 x i32> <i32 2, i32 3> %tmp2 = shufflevector <4 x i32> %B, <4 x i32> undef, <2 x i32> <i32 1, i32 1> %tmp5 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2) @@ -1499,7 +1691,7 @@ define i32 @sqdmlal_lane_1s(i32 %A, i16 %B, <4 x i16> %C) nounwind { ; CHECK-NEXT: fmov s1, w1 ; CHECK-NEXT: fmov s2, w0 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: sqdmlal.h s2, h1, v0[1] +; CHECK-NEXT: sqdmlal s2, h1, v0.h[1] ; CHECK-NEXT: fmov w0, s2 ; CHECK-NEXT: ret %lhs = insertelement <4 x i16> undef, i16 %B, i32 0 @@ -1517,7 +1709,7 @@ define i32 @sqdmlsl_lane_1s(i32 %A, i16 %B, <4 x i16> %C) nounwind { ; CHECK-NEXT: fmov s1, w1 ; CHECK-NEXT: fmov s2, w0 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: sqdmlsl.h s2, h1, v0[1] +; CHECK-NEXT: sqdmlsl s2, h1, v0.h[1] ; CHECK-NEXT: fmov w0, s2 ; CHECK-NEXT: ret %lhs = insertelement <4 x i16> undef, i16 %B, i32 0 @@ -1530,15 +1722,24 @@ define i32 @sqdmlsl_lane_1s(i32 %A, i16 %B, <4 x i16> %C) nounwind { declare i32 @llvm.aarch64.neon.sqsub.i32(i32, i32) define i32 @sqadd_lane1_sqdmull4s(i32 %A, <4 x i16> %B, <4 x i16> %C) nounwind { -; CHECK-LABEL: sqadd_lane1_sqdmull4s: -; CHECK: // %bb.0: -; CHECK-NEXT: sqdmull.4s v0, v0, v1 -; CHECK-NEXT: mov.s w8, v0[1] -; CHECK-NEXT: fmov s0, w0 -; CHECK-NEXT: fmov s1, w8 -; CHECK-NEXT: sqadd s0, s0, s1 -; CHECK-NEXT: fmov w0, s0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: sqadd_lane1_sqdmull4s: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sqdmull v0.4s, v0.4h, v1.4h +; CHECK-SD-NEXT: mov w8, v0.s[1] +; CHECK-SD-NEXT: fmov s0, w0 +; CHECK-SD-NEXT: fmov s1, w8 +; CHECK-SD-NEXT: sqadd s0, s0, s1 +; CHECK-SD-NEXT: fmov w0, s0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: sqadd_lane1_sqdmull4s: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sqdmull v0.4s, v0.4h, v1.4h +; CHECK-GI-NEXT: fmov s1, w0 +; CHECK-GI-NEXT: mov s0, v0.s[1] +; CHECK-GI-NEXT: sqadd s0, s1, s0 +; CHECK-GI-NEXT: fmov w0, s0 +; CHECK-GI-NEXT: ret %prod.vec = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %B, <4 x i16> %C) %prod = extractelement <4 x i32> %prod.vec, i32 1 %res = call i32 @llvm.aarch64.neon.sqadd.i32(i32 %A, i32 %prod) @@ -1546,15 +1747,24 @@ define i32 @sqadd_lane1_sqdmull4s(i32 %A, <4 x i16> %B, <4 x i16> %C) nounwind { } define i32 @sqsub_lane1_sqdmull4s(i32 %A, <4 x i16> %B, <4 x i16> %C) nounwind { -; CHECK-LABEL: sqsub_lane1_sqdmull4s: -; CHECK: // %bb.0: -; CHECK-NEXT: sqdmull.4s v0, v0, v1 -; CHECK-NEXT: mov.s w8, v0[1] -; CHECK-NEXT: fmov s0, w0 -; CHECK-NEXT: fmov s1, w8 -; CHECK-NEXT: sqsub s0, s0, s1 -; CHECK-NEXT: fmov w0, s0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: sqsub_lane1_sqdmull4s: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sqdmull v0.4s, v0.4h, v1.4h +; CHECK-SD-NEXT: mov w8, v0.s[1] +; CHECK-SD-NEXT: fmov s0, w0 +; CHECK-SD-NEXT: fmov s1, w8 +; CHECK-SD-NEXT: sqsub s0, s0, s1 +; CHECK-SD-NEXT: fmov w0, s0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: sqsub_lane1_sqdmull4s: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sqdmull v0.4s, v0.4h, v1.4h +; CHECK-GI-NEXT: fmov s1, w0 +; CHECK-GI-NEXT: mov s0, v0.s[1] +; CHECK-GI-NEXT: sqsub s0, s1, s0 +; CHECK-GI-NEXT: fmov w0, s0 +; CHECK-GI-NEXT: ret %prod.vec = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %B, <4 x i16> %C) %prod = extractelement <4 x i32> %prod.vec, i32 1 %res = call i32 @llvm.aarch64.neon.sqsub.i32(i32 %A, i32 %prod) @@ -1567,7 +1777,7 @@ define i64 @sqdmlal_lane_1d(i64 %A, i32 %B, <2 x i32> %C) nounwind { ; CHECK-NEXT: fmov d1, x0 ; CHECK-NEXT: fmov s2, w1 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: sqdmlal.s d1, s2, v0[1] +; CHECK-NEXT: sqdmlal d1, s2, v0.s[1] ; CHECK-NEXT: fmov x0, d1 ; CHECK-NEXT: ret %rhs = extractelement <2 x i32> %C, i32 1 @@ -1584,7 +1794,7 @@ define i64 @sqdmlsl_lane_1d(i64 %A, i32 %B, <2 x i32> %C) nounwind { ; CHECK-NEXT: fmov d1, x0 ; CHECK-NEXT: fmov s2, w1 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: sqdmlsl.s d1, s2, v0[1] +; CHECK-NEXT: sqdmlsl d1, s2, v0.s[1] ; CHECK-NEXT: fmov x0, d1 ; CHECK-NEXT: ret %rhs = extractelement <2 x i32> %C, i32 1 @@ -1599,8 +1809,8 @@ define <4 x i32> @umlal_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwi ; CHECK-LABEL: umlal_lane_4s: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: umlal.4s v2, v0, v1[1] -; CHECK-NEXT: mov.16b v0, v2 +; CHECK-NEXT: umlal v2.4s, v0.4h, v1.h[1] +; CHECK-NEXT: mov v0.16b, v2.16b ; CHECK-NEXT: ret %tmp4 = shufflevector <4 x i16> %B, <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1> %tmp5 = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %A, <4 x i16> %tmp4) @@ -1612,8 +1822,8 @@ define <2 x i64> @umlal_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) nounwi ; CHECK-LABEL: umlal_lane_2d: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: umlal.2d v2, v0, v1[1] -; CHECK-NEXT: mov.16b v0, v2 +; CHECK-NEXT: umlal v2.2d, v0.2s, v1.s[1] +; CHECK-NEXT: mov v0.16b, v2.16b ; CHECK-NEXT: ret %tmp4 = shufflevector <2 x i32> %B, <2 x i32> poison, <2 x i32> <i32 1, i32 1> %tmp5 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %A, <2 x i32> %tmp4) @@ -1626,8 +1836,8 @@ define <4 x i32> @smlsl_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwi ; CHECK-LABEL: smlsl_lane_4s: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: smlsl.4s v2, v0, v1[1] -; CHECK-NEXT: mov.16b v0, v2 +; CHECK-NEXT: smlsl v2.4s, v0.4h, v1.h[1] +; CHECK-NEXT: mov v0.16b, v2.16b ; CHECK-NEXT: ret %tmp4 = shufflevector <4 x i16> %B, <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1> %tmp5 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %A, <4 x i16> %tmp4) @@ -1639,8 +1849,8 @@ define <2 x i64> @smlsl_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) nounwi ; CHECK-LABEL: smlsl_lane_2d: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: smlsl.2d v2, v0, v1[1] -; CHECK-NEXT: mov.16b v0, v2 +; CHECK-NEXT: smlsl v2.2d, v0.2s, v1.s[1] +; CHECK-NEXT: mov v0.16b, v2.16b ; CHECK-NEXT: ret %tmp4 = shufflevector <2 x i32> %B, <2 x i32> poison, <2 x i32> <i32 1, i32 1> %tmp5 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %A, <2 x i32> %tmp4) @@ -1652,8 +1862,8 @@ define <4 x i32> @sqdmlsl_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) noun ; CHECK-LABEL: sqdmlsl_lane_4s: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: sqdmlsl.4s v2, v0, v1[1] -; CHECK-NEXT: mov.16b v0, v2 +; CHECK-NEXT: sqdmlsl v2.4s, v0.4h, v1.h[1] +; CHECK-NEXT: mov v0.16b, v2.16b ; CHECK-NEXT: ret %tmp4 = shufflevector <4 x i16> %B, <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1> %tmp5 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %A, <4 x i16> %tmp4) @@ -1665,8 +1875,8 @@ define <2 x i64> @sqdmlsl_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) noun ; CHECK-LABEL: sqdmlsl_lane_2d: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: sqdmlsl.2d v2, v0, v1[1] -; CHECK-NEXT: mov.16b v0, v2 +; CHECK-NEXT: sqdmlsl v2.2d, v0.2s, v1.s[1] +; CHECK-NEXT: mov v0.16b, v2.16b ; CHECK-NEXT: ret %tmp4 = shufflevector <2 x i32> %B, <2 x i32> poison, <2 x i32> <i32 1, i32 1> %tmp5 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %A, <2 x i32> %tmp4) @@ -1675,11 +1885,18 @@ define <2 x i64> @sqdmlsl_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) noun } define <4 x i32> @sqdmlsl2_lane_4s(<8 x i16> %A, <8 x i16> %B, <4 x i32> %C) nounwind { -; CHECK-LABEL: sqdmlsl2_lane_4s: -; CHECK: // %bb.0: -; CHECK-NEXT: sqdmlsl2.4s v2, v0, v1[1] -; CHECK-NEXT: mov.16b v0, v2 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: sqdmlsl2_lane_4s: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sqdmlsl2 v2.4s, v0.8h, v1.h[1] +; CHECK-SD-NEXT: mov v0.16b, v2.16b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: sqdmlsl2_lane_4s: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov d3, v0.d[1] +; CHECK-GI-NEXT: mov v0.16b, v2.16b +; CHECK-GI-NEXT: sqdmlsl v0.4s, v3.4h, v1.h[1] +; CHECK-GI-NEXT: ret %tmp1 = shufflevector <8 x i16> %A, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7> %tmp2 = shufflevector <8 x i16> %B, <8 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> %tmp5 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2) @@ -1688,11 +1905,18 @@ define <4 x i32> @sqdmlsl2_lane_4s(<8 x i16> %A, <8 x i16> %B, <4 x i32> %C) nou } define <2 x i64> @sqdmlsl2_lane_2d(<4 x i32> %A, <4 x i32> %B, <2 x i64> %C) nounwind { -; CHECK-LABEL: sqdmlsl2_lane_2d: -; CHECK: // %bb.0: -; CHECK-NEXT: sqdmlsl2.2d v2, v0, v1[1] -; CHECK-NEXT: mov.16b v0, v2 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: sqdmlsl2_lane_2d: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sqdmlsl2 v2.2d, v0.4s, v1.s[1] +; CHECK-SD-NEXT: mov v0.16b, v2.16b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: sqdmlsl2_lane_2d: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov d3, v0.d[1] +; CHECK-GI-NEXT: mov v0.16b, v2.16b +; CHECK-GI-NEXT: sqdmlsl v0.2d, v3.2s, v1.s[1] +; CHECK-GI-NEXT: ret %tmp1 = shufflevector <4 x i32> %A, <4 x i32> undef, <2 x i32> <i32 2, i32 3> %tmp2 = shufflevector <4 x i32> %B, <4 x i32> undef, <2 x i32> <i32 1, i32 1> %tmp5 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2) @@ -1704,8 +1928,8 @@ define <4 x i32> @umlsl_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwi ; CHECK-LABEL: umlsl_lane_4s: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: umlsl.4s v2, v0, v1[1] -; CHECK-NEXT: mov.16b v0, v2 +; CHECK-NEXT: umlsl v2.4s, v0.4h, v1.h[1] +; CHECK-NEXT: mov v0.16b, v2.16b ; CHECK-NEXT: ret %tmp4 = shufflevector <4 x i16> %B, <4 x i16> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1> %tmp5 = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %A, <4 x i16> %tmp4) @@ -1717,8 +1941,8 @@ define <2 x i64> @umlsl_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) nounwi ; CHECK-LABEL: umlsl_lane_2d: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: umlsl.2d v2, v0, v1[1] -; CHECK-NEXT: mov.16b v0, v2 +; CHECK-NEXT: umlsl v2.2d, v0.2s, v1.s[1] +; CHECK-NEXT: mov v0.16b, v2.16b ; CHECK-NEXT: ret %tmp4 = shufflevector <2 x i32> %B, <2 x i32> poison, <2 x i32> <i32 1, i32 1> %tmp5 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %A, <2 x i32> %tmp4) @@ -1748,7 +1972,7 @@ define double @fmulxd(double %a, double %b) nounwind { define float @fmulxs_lane(float %a, <4 x float> %vec) nounwind { ; CHECK-LABEL: fmulxs_lane: ; CHECK: // %bb.0: -; CHECK-NEXT: fmulx.s s0, s0, v1[3] +; CHECK-NEXT: fmulx s0, s0, v1.s[3] ; CHECK-NEXT: ret %b = extractelement <4 x float> %vec, i32 3 %fmulx.i = tail call float @llvm.aarch64.neon.fmulx.f32(float %a, float %b) nounwind @@ -1758,7 +1982,7 @@ define float @fmulxs_lane(float %a, <4 x float> %vec) nounwind { define double @fmulxd_lane(double %a, <2 x double> %vec) nounwind { ; CHECK-LABEL: fmulxd_lane: ; CHECK: // %bb.0: -; CHECK-NEXT: fmulx.d d0, d0, v1[1] +; CHECK-NEXT: fmulx d0, d0, v1.d[1] ; CHECK-NEXT: ret %b = extractelement <2 x double> %vec, i32 1 %fmulx.i = tail call double @llvm.aarch64.neon.fmulx.f64(double %a, double %b) nounwind @@ -1772,7 +1996,7 @@ declare float @llvm.aarch64.neon.fmulx.f32(float, float) nounwind readnone define <8 x i16> @smull2_8h_simple(<16 x i8> %a, <16 x i8> %b) nounwind { ; CHECK-LABEL: smull2_8h_simple: ; CHECK: // %bb.0: -; CHECK-NEXT: smull2.8h v0, v0, v1 +; CHECK-NEXT: smull2 v0.8h, v0.16b, v1.16b ; CHECK-NEXT: ret %1 = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> %2 = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> @@ -1783,7 +2007,7 @@ define <8 x i16> @smull2_8h_simple(<16 x i8> %a, <16 x i8> %b) nounwind { define <8 x i16> @foo0(<16 x i8> %a, <16 x i8> %b) nounwind { ; CHECK-LABEL: foo0: ; CHECK: // %bb.0: -; CHECK-NEXT: smull2.8h v0, v0, v1 +; CHECK-NEXT: smull2 v0.8h, v0.16b, v1.16b ; CHECK-NEXT: ret %tmp = bitcast <16 x i8> %a to <2 x i64> %shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1> @@ -1798,7 +2022,7 @@ define <8 x i16> @foo0(<16 x i8> %a, <16 x i8> %b) nounwind { define <4 x i32> @foo1(<8 x i16> %a, <8 x i16> %b) nounwind { ; CHECK-LABEL: foo1: ; CHECK: // %bb.0: -; CHECK-NEXT: smull2.4s v0, v0, v1 +; CHECK-NEXT: smull2 v0.4s, v0.8h, v1.8h ; CHECK-NEXT: ret %tmp = bitcast <8 x i16> %a to <2 x i64> %shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1> @@ -1813,7 +2037,7 @@ define <4 x i32> @foo1(<8 x i16> %a, <8 x i16> %b) nounwind { define <2 x i64> @foo2(<4 x i32> %a, <4 x i32> %b) nounwind { ; CHECK-LABEL: foo2: ; CHECK: // %bb.0: -; CHECK-NEXT: smull2.2d v0, v0, v1 +; CHECK-NEXT: smull2 v0.2d, v0.4s, v1.4s ; CHECK-NEXT: ret %tmp = bitcast <4 x i32> %a to <2 x i64> %shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1> @@ -1828,7 +2052,7 @@ define <2 x i64> @foo2(<4 x i32> %a, <4 x i32> %b) nounwind { define <8 x i16> @foo3(<16 x i8> %a, <16 x i8> %b) nounwind { ; CHECK-LABEL: foo3: ; CHECK: // %bb.0: -; CHECK-NEXT: umull2.8h v0, v0, v1 +; CHECK-NEXT: umull2 v0.8h, v0.16b, v1.16b ; CHECK-NEXT: ret %tmp = bitcast <16 x i8> %a to <2 x i64> %shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1> @@ -1843,7 +2067,7 @@ define <8 x i16> @foo3(<16 x i8> %a, <16 x i8> %b) nounwind { define <4 x i32> @foo4(<8 x i16> %a, <8 x i16> %b) nounwind { ; CHECK-LABEL: foo4: ; CHECK: // %bb.0: -; CHECK-NEXT: umull2.4s v0, v0, v1 +; CHECK-NEXT: umull2 v0.4s, v0.8h, v1.8h ; CHECK-NEXT: ret %tmp = bitcast <8 x i16> %a to <2 x i64> %shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1> @@ -1858,7 +2082,7 @@ define <4 x i32> @foo4(<8 x i16> %a, <8 x i16> %b) nounwind { define <2 x i64> @foo5(<4 x i32> %a, <4 x i32> %b) nounwind { ; CHECK-LABEL: foo5: ; CHECK: // %bb.0: -; CHECK-NEXT: umull2.2d v0, v0, v1 +; CHECK-NEXT: umull2 v0.2d, v0.4s, v1.4s ; CHECK-NEXT: ret %tmp = bitcast <4 x i32> %a to <2 x i64> %shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1> @@ -1871,11 +2095,18 @@ define <2 x i64> @foo5(<4 x i32> %a, <4 x i32> %b) nounwind { } define <4 x i32> @foo6(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind readnone optsize ssp { -; CHECK-LABEL: foo6: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: smull2.4s v0, v1, v2[1] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: foo6: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-SD-NEXT: smull2 v0.4s, v1.8h, v2.h[1] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: foo6: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: mov d0, v1.d[1] +; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-GI-NEXT: smull v0.4s, v0.4h, v2.h[1] +; CHECK-GI-NEXT: ret entry: %0 = bitcast <8 x i16> %b to <2 x i64> %shuffle.i = shufflevector <2 x i64> %0, <2 x i64> undef, <1 x i32> <i32 1> @@ -1889,7 +2120,7 @@ define <4 x i32> @foo6a(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind readn ; CHECK-LABEL: foo6a: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: smull.4s v0, v1, v2[1] +; CHECK-NEXT: smull v0.4s, v1.4h, v2.h[1] ; CHECK-NEXT: ret entry: %0 = bitcast <8 x i16> %b to <2 x i64> @@ -1901,11 +2132,18 @@ entry: } define <2 x i64> @foo7(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c) nounwind readnone optsize ssp { -; CHECK-LABEL: foo7: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: smull2.2d v0, v1, v2[1] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: foo7: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-SD-NEXT: smull2 v0.2d, v1.4s, v2.s[1] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: foo7: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: mov d0, v1.d[1] +; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-GI-NEXT: smull v0.2d, v0.2s, v2.s[1] +; CHECK-GI-NEXT: ret entry: %0 = bitcast <4 x i32> %b to <2 x i64> %shuffle.i = shufflevector <2 x i64> %0, <2 x i64> undef, <1 x i32> <i32 1> @@ -1919,7 +2157,7 @@ define <2 x i64> @foo7a(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c) nounwind readn ; CHECK-LABEL: foo7a: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: smull.2d v0, v1, v2[1] +; CHECK-NEXT: smull v0.2d, v1.2s, v2.s[1] ; CHECK-NEXT: ret entry: %0 = bitcast <4 x i32> %b to <2 x i64> @@ -1932,11 +2170,18 @@ entry: define <4 x i32> @foo8(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind readnone optsize ssp { -; CHECK-LABEL: foo8: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: umull2.4s v0, v1, v2[1] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: foo8: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-SD-NEXT: umull2 v0.4s, v1.8h, v2.h[1] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: foo8: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: mov d0, v1.d[1] +; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-GI-NEXT: umull v0.4s, v0.4h, v2.h[1] +; CHECK-GI-NEXT: ret entry: %0 = bitcast <8 x i16> %b to <2 x i64> %shuffle.i = shufflevector <2 x i64> %0, <2 x i64> undef, <1 x i32> <i32 1> @@ -1950,7 +2195,7 @@ define <4 x i32> @foo8a(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind readn ; CHECK-LABEL: foo8a: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: umull.4s v0, v1, v2[1] +; CHECK-NEXT: umull v0.4s, v1.4h, v2.h[1] ; CHECK-NEXT: ret entry: %0 = bitcast <8 x i16> %b to <2 x i64> @@ -1962,11 +2207,18 @@ entry: } define <2 x i64> @foo9(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c) nounwind readnone optsize ssp { -; CHECK-LABEL: foo9: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: umull2.2d v0, v1, v2[1] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: foo9: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-SD-NEXT: umull2 v0.2d, v1.4s, v2.s[1] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: foo9: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: mov d0, v1.d[1] +; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-GI-NEXT: umull v0.2d, v0.2s, v2.s[1] +; CHECK-GI-NEXT: ret entry: %0 = bitcast <4 x i32> %b to <2 x i64> %shuffle.i = shufflevector <2 x i64> %0, <2 x i64> undef, <1 x i32> <i32 1> @@ -1980,7 +2232,7 @@ define <2 x i64> @foo9a(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c) nounwind readn ; CHECK-LABEL: foo9a: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: umull.2d v0, v1, v2[1] +; CHECK-NEXT: umull v0.2d, v1.2s, v2.s[1] ; CHECK-NEXT: ret entry: %0 = bitcast <4 x i32> %b to <2 x i64> @@ -1994,7 +2246,7 @@ entry: define <8 x i16> @bar0(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) nounwind { ; CHECK-LABEL: bar0: ; CHECK: // %bb.0: -; CHECK-NEXT: smlal2.8h v0, v1, v2 +; CHECK-NEXT: smlal2 v0.8h, v1.16b, v2.16b ; CHECK-NEXT: ret %tmp = bitcast <16 x i8> %b to <2 x i64> %shuffle.i.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1> @@ -2010,7 +2262,7 @@ define <8 x i16> @bar0(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) nounwind { define <4 x i32> @bar1(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) nounwind { ; CHECK-LABEL: bar1: ; CHECK: // %bb.0: -; CHECK-NEXT: smlal2.4s v0, v1, v2 +; CHECK-NEXT: smlal2 v0.4s, v1.8h, v2.8h ; CHECK-NEXT: ret %tmp = bitcast <8 x i16> %b to <2 x i64> %shuffle.i.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1> @@ -2026,7 +2278,7 @@ define <4 x i32> @bar1(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) nounwind { define <2 x i64> @bar2(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) nounwind { ; CHECK-LABEL: bar2: ; CHECK: // %bb.0: -; CHECK-NEXT: smlal2.2d v0, v1, v2 +; CHECK-NEXT: smlal2 v0.2d, v1.4s, v2.4s ; CHECK-NEXT: ret %tmp = bitcast <4 x i32> %b to <2 x i64> %shuffle.i.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1> @@ -2042,7 +2294,7 @@ define <2 x i64> @bar2(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) nounwind { define <8 x i16> @bar3(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) nounwind { ; CHECK-LABEL: bar3: ; CHECK: // %bb.0: -; CHECK-NEXT: umlal2.8h v0, v1, v2 +; CHECK-NEXT: umlal2 v0.8h, v1.16b, v2.16b ; CHECK-NEXT: ret %tmp = bitcast <16 x i8> %b to <2 x i64> %shuffle.i.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1> @@ -2058,7 +2310,7 @@ define <8 x i16> @bar3(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) nounwind { define <4 x i32> @bar4(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) nounwind { ; CHECK-LABEL: bar4: ; CHECK: // %bb.0: -; CHECK-NEXT: umlal2.4s v0, v1, v2 +; CHECK-NEXT: umlal2 v0.4s, v1.8h, v2.8h ; CHECK-NEXT: ret %tmp = bitcast <8 x i16> %b to <2 x i64> %shuffle.i.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1> @@ -2074,7 +2326,7 @@ define <4 x i32> @bar4(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) nounwind { define <2 x i64> @bar5(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) nounwind { ; CHECK-LABEL: bar5: ; CHECK: // %bb.0: -; CHECK-NEXT: umlal2.2d v0, v1, v2 +; CHECK-NEXT: umlal2 v0.2d, v1.4s, v2.4s ; CHECK-NEXT: ret %tmp = bitcast <4 x i32> %b to <2 x i64> %shuffle.i.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1> @@ -2088,11 +2340,18 @@ define <2 x i64> @bar5(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) nounwind { } define <4 x i32> @mlal2_1(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind { -; CHECK-LABEL: mlal2_1: -; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: smlal2.4s v0, v1, v2[3] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: mlal2_1: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-SD-NEXT: smlal2 v0.4s, v1.8h, v2.h[3] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: mlal2_1: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-GI-NEXT: dup v2.8h, v2.h[3] +; CHECK-GI-NEXT: smlal2 v0.4s, v1.8h, v2.8h +; CHECK-GI-NEXT: ret %shuffle = shufflevector <4 x i16> %c, <4 x i16> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3> %tmp = bitcast <8 x i16> %b to <2 x i64> %shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1> @@ -2106,11 +2365,18 @@ define <4 x i32> @mlal2_1(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind { } define <2 x i64> @mlal2_2(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c) nounwind { -; CHECK-LABEL: mlal2_2: -; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: smlal2.2d v0, v1, v2[1] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: mlal2_2: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-SD-NEXT: smlal2 v0.2d, v1.4s, v2.s[1] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: mlal2_2: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-GI-NEXT: dup v2.4s, v2.s[1] +; CHECK-GI-NEXT: smlal2 v0.2d, v1.4s, v2.4s +; CHECK-GI-NEXT: ret %shuffle = shufflevector <2 x i32> %c, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> %tmp = bitcast <4 x i32> %b to <2 x i64> %shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1> @@ -2124,11 +2390,18 @@ define <2 x i64> @mlal2_2(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c) nounwind { } define <4 x i32> @mlal2_4(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind { -; CHECK-LABEL: mlal2_4: -; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: umlal2.4s v0, v1, v2[2] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: mlal2_4: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-SD-NEXT: umlal2 v0.4s, v1.8h, v2.h[2] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: mlal2_4: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-GI-NEXT: dup v2.8h, v2.h[2] +; CHECK-GI-NEXT: umlal2 v0.4s, v1.8h, v2.8h +; CHECK-GI-NEXT: ret %shuffle = shufflevector <4 x i16> %c, <4 x i16> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2> %tmp = bitcast <8 x i16> %b to <2 x i64> %shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1> @@ -2142,11 +2415,18 @@ define <4 x i32> @mlal2_4(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind { } define <2 x i64> @mlal2_5(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c) nounwind { -; CHECK-LABEL: mlal2_5: -; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: umlal2.2d v0, v1, v2[0] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: mlal2_5: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-SD-NEXT: umlal2 v0.2d, v1.4s, v2.s[0] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: mlal2_5: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-GI-NEXT: dup v2.4s, v2.s[0] +; CHECK-GI-NEXT: umlal2 v0.2d, v1.4s, v2.4s +; CHECK-GI-NEXT: ret %shuffle = shufflevector <2 x i32> %c, <2 x i32> undef, <4 x i32> zeroinitializer %tmp = bitcast <4 x i32> %b to <2 x i64> %shuffle.i.i = shufflevector <2 x i64> %tmp, <2 x i64> undef, <1 x i32> <i32 1> @@ -2164,7 +2444,7 @@ define <2 x double> @vmulq_n_f64(<2 x double> %x, double %y) nounwind readnone s ; CHECK-LABEL: vmulq_n_f64: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: fmul.2d v0, v0, v1[0] +; CHECK-NEXT: fmul v0.2d, v0.2d, v1.d[0] ; CHECK-NEXT: ret entry: %vecinit.i = insertelement <2 x double> undef, double %y, i32 0 @@ -2177,7 +2457,7 @@ define <4 x float> @vmulq_n_f32(<4 x float> %x, float %y) nounwind readnone ssp ; CHECK-LABEL: vmulq_n_f32: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: // kill: def $s1 killed $s1 def $q1 -; CHECK-NEXT: fmul.4s v0, v0, v1[0] +; CHECK-NEXT: fmul v0.4s, v0.4s, v1.s[0] ; CHECK-NEXT: ret entry: %vecinit.i = insertelement <4 x float> undef, float %y, i32 0 @@ -2192,7 +2472,7 @@ define <2 x float> @vmul_n_f32(<2 x float> %x, float %y) nounwind readnone ssp { ; CHECK-LABEL: vmul_n_f32: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: // kill: def $s1 killed $s1 def $q1 -; CHECK-NEXT: fmul.2s v0, v0, v1[0] +; CHECK-NEXT: fmul v0.2s, v0.2s, v1.s[0] ; CHECK-NEXT: ret entry: %vecinit.i = insertelement <2 x float> undef, float %y, i32 0 @@ -2204,7 +2484,7 @@ entry: define <4 x i16> @vmla_laneq_s16_test(<4 x i16> %a, <4 x i16> %b, <8 x i16> %c) nounwind readnone ssp { ; CHECK-LABEL: vmla_laneq_s16_test: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: mla.4h v0, v1, v2[6] +; CHECK-NEXT: mla v0.4h, v1.4h, v2.h[6] ; CHECK-NEXT: ret entry: %shuffle = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 6, i32 6, i32 6, i32 6> @@ -2216,7 +2496,7 @@ entry: define <2 x i32> @vmla_laneq_s32_test(<2 x i32> %a, <2 x i32> %b, <4 x i32> %c) nounwind readnone ssp { ; CHECK-LABEL: vmla_laneq_s32_test: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: mla.2s v0, v1, v2[3] +; CHECK-NEXT: mla v0.2s, v1.2s, v2.s[3] ; CHECK-NEXT: ret entry: %shuffle = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 3, i32 3> @@ -2226,10 +2506,16 @@ entry: } define <8 x i16> @not_really_vmlaq_laneq_s16_test(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) nounwind readnone ssp { -; CHECK-LABEL: not_really_vmlaq_laneq_s16_test: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: mla.8h v0, v1, v2[5] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: not_really_vmlaq_laneq_s16_test: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: mla v0.8h, v1.8h, v2.h[5] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: not_really_vmlaq_laneq_s16_test: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: ext v2.16b, v2.16b, v0.16b, #8 +; CHECK-GI-NEXT: mla v0.8h, v1.8h, v2.h[1] +; CHECK-GI-NEXT: ret entry: %shuffle1 = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7> %shuffle2 = shufflevector <4 x i16> %shuffle1, <4 x i16> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> @@ -2239,10 +2525,16 @@ entry: } define <4 x i32> @not_really_vmlaq_laneq_s32_test(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) nounwind readnone ssp { -; CHECK-LABEL: not_really_vmlaq_laneq_s32_test: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: mla.4s v0, v1, v2[3] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: not_really_vmlaq_laneq_s32_test: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: mla v0.4s, v1.4s, v2.s[3] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: not_really_vmlaq_laneq_s32_test: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: ext v2.16b, v2.16b, v0.16b, #8 +; CHECK-GI-NEXT: mla v0.4s, v1.4s, v2.s[1] +; CHECK-GI-NEXT: ret entry: %shuffle1 = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3> %shuffle2 = shufflevector <2 x i32> %shuffle1, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> @@ -2254,7 +2546,7 @@ entry: define <4 x i32> @vmull_laneq_s16_test(<4 x i16> %a, <8 x i16> %b) nounwind readnone ssp { ; CHECK-LABEL: vmull_laneq_s16_test: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: smull.4s v0, v0, v1[6] +; CHECK-NEXT: smull v0.4s, v0.4h, v1.h[6] ; CHECK-NEXT: ret entry: %shuffle = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 6, i32 6, i32 6, i32 6> @@ -2265,7 +2557,7 @@ entry: define <2 x i64> @vmull_laneq_s32_test(<2 x i32> %a, <4 x i32> %b) nounwind readnone ssp { ; CHECK-LABEL: vmull_laneq_s32_test: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: smull.2d v0, v0, v1[2] +; CHECK-NEXT: smull v0.2d, v0.2s, v1.s[2] ; CHECK-NEXT: ret entry: %shuffle = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 2> @@ -2275,7 +2567,7 @@ entry: define <4 x i32> @vmull_laneq_u16_test(<4 x i16> %a, <8 x i16> %b) nounwind readnone ssp { ; CHECK-LABEL: vmull_laneq_u16_test: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: umull.4s v0, v0, v1[6] +; CHECK-NEXT: umull v0.4s, v0.4h, v1.h[6] ; CHECK-NEXT: ret entry: %shuffle = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 6, i32 6, i32 6, i32 6> @@ -2286,7 +2578,7 @@ entry: define <2 x i64> @vmull_laneq_u32_test(<2 x i32> %a, <4 x i32> %b) nounwind readnone ssp { ; CHECK-LABEL: vmull_laneq_u32_test: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: umull.2d v0, v0, v1[2] +; CHECK-NEXT: umull v0.2d, v0.2s, v1.s[2] ; CHECK-NEXT: ret entry: %shuffle = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 2> @@ -2297,8 +2589,8 @@ entry: define <4 x i32> @vmull_low_n_s16_test(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c, i32 %d) nounwind readnone optsize ssp { ; CHECK-LABEL: vmull_low_n_s16_test: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: dup.4h v0, w0 -; CHECK-NEXT: smull.4s v0, v1, v0 +; CHECK-NEXT: dup v0.4h, w0 +; CHECK-NEXT: smull v0.4s, v1.4h, v0.4h ; CHECK-NEXT: ret entry: %conv = trunc i32 %d to i16 @@ -2314,11 +2606,18 @@ entry: } define <4 x i32> @vmull_high_n_s16_test(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c, i32 %d) nounwind readnone optsize ssp { -; CHECK-LABEL: vmull_high_n_s16_test: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: dup.8h v0, w0 -; CHECK-NEXT: smull2.4s v0, v1, v0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: vmull_high_n_s16_test: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: dup v0.8h, w0 +; CHECK-SD-NEXT: smull2 v0.4s, v1.8h, v0.8h +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: vmull_high_n_s16_test: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: mov d0, v1.d[1] +; CHECK-GI-NEXT: dup v1.4h, w0 +; CHECK-GI-NEXT: smull v0.4s, v0.4h, v1.4h +; CHECK-GI-NEXT: ret entry: %conv = trunc i32 %d to i16 %0 = bitcast <8 x i16> %b to <2 x i64> @@ -2333,11 +2632,18 @@ entry: } define <2 x i64> @vmull_high_n_s32_test(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c, i32 %d) nounwind readnone optsize ssp { -; CHECK-LABEL: vmull_high_n_s32_test: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: dup.4s v0, w0 -; CHECK-NEXT: smull2.2d v0, v1, v0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: vmull_high_n_s32_test: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: dup v0.4s, w0 +; CHECK-SD-NEXT: smull2 v0.2d, v1.4s, v0.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: vmull_high_n_s32_test: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: mov d0, v1.d[1] +; CHECK-GI-NEXT: dup v1.2s, w0 +; CHECK-GI-NEXT: smull v0.2d, v0.2s, v1.2s +; CHECK-GI-NEXT: ret entry: %0 = bitcast <4 x i32> %b to <2 x i64> %shuffle.i.i = shufflevector <2 x i64> %0, <2 x i64> undef, <1 x i32> <i32 1> @@ -2349,11 +2655,18 @@ entry: } define <4 x i32> @vmull_high_n_u16_test(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c, i32 %d) nounwind readnone optsize ssp { -; CHECK-LABEL: vmull_high_n_u16_test: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: dup.8h v0, w0 -; CHECK-NEXT: umull2.4s v0, v1, v0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: vmull_high_n_u16_test: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: dup v0.8h, w0 +; CHECK-SD-NEXT: umull2 v0.4s, v1.8h, v0.8h +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: vmull_high_n_u16_test: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: mov d0, v1.d[1] +; CHECK-GI-NEXT: dup v1.4h, w0 +; CHECK-GI-NEXT: umull v0.4s, v0.4h, v1.4h +; CHECK-GI-NEXT: ret entry: %conv = trunc i32 %d to i16 %0 = bitcast <8 x i16> %b to <2 x i64> @@ -2368,11 +2681,18 @@ entry: } define <2 x i64> @vmull_high_n_u32_test(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c, i32 %d) nounwind readnone optsize ssp { -; CHECK-LABEL: vmull_high_n_u32_test: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: dup.4s v0, w0 -; CHECK-NEXT: umull2.2d v0, v1, v0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: vmull_high_n_u32_test: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: dup v0.4s, w0 +; CHECK-SD-NEXT: umull2 v0.2d, v1.4s, v0.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: vmull_high_n_u32_test: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: mov d0, v1.d[1] +; CHECK-GI-NEXT: dup v1.2s, w0 +; CHECK-GI-NEXT: umull v0.2d, v0.2s, v1.2s +; CHECK-GI-NEXT: ret entry: %0 = bitcast <4 x i32> %b to <2 x i64> %shuffle.i.i = shufflevector <2 x i64> %0, <2 x i64> undef, <1 x i32> <i32 1> @@ -2384,10 +2704,17 @@ entry: } define <4 x i32> @vmul_built_dup_test(<4 x i32> %a, <4 x i32> %b) { -; CHECK-LABEL: vmul_built_dup_test: -; CHECK: // %bb.0: -; CHECK-NEXT: mul.4s v0, v0, v1[1] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: vmul_built_dup_test: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mul v0.4s, v0.4s, v1.s[1] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: vmul_built_dup_test: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov s1, v1.s[1] +; CHECK-GI-NEXT: dup v1.4s, v1.s[0] +; CHECK-GI-NEXT: mul v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: ret %vget_lane = extractelement <4 x i32> %b, i32 1 %vecinit.i = insertelement <4 x i32> undef, i32 %vget_lane, i32 0 %vecinit1.i = insertelement <4 x i32> %vecinit.i, i32 %vget_lane, i32 1 @@ -2398,11 +2725,19 @@ define <4 x i32> @vmul_built_dup_test(<4 x i32> %a, <4 x i32> %b) { } define <4 x i16> @vmul_built_dup_fromsmall_test(<4 x i16> %a, <4 x i16> %b) { -; CHECK-LABEL: vmul_built_dup_fromsmall_test: -; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: mul.4h v0, v0, v1[3] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: vmul_built_dup_fromsmall_test: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1 +; CHECK-SD-NEXT: mul v0.4h, v0.4h, v1.h[3] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: vmul_built_dup_fromsmall_test: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1 +; CHECK-GI-NEXT: mov h1, v1.h[3] +; CHECK-GI-NEXT: dup v1.4h, v1.h[0] +; CHECK-GI-NEXT: mul v0.4h, v0.4h, v1.4h +; CHECK-GI-NEXT: ret %vget_lane = extractelement <4 x i16> %b, i32 3 %vecinit.i = insertelement <4 x i16> undef, i16 %vget_lane, i32 0 %vecinit1.i = insertelement <4 x i16> %vecinit.i, i16 %vget_lane, i32 1 @@ -2413,11 +2748,18 @@ define <4 x i16> @vmul_built_dup_fromsmall_test(<4 x i16> %a, <4 x i16> %b) { } define <8 x i16> @vmulq_built_dup_fromsmall_test(<8 x i16> %a, <4 x i16> %b) { -; CHECK-LABEL: vmulq_built_dup_fromsmall_test: -; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: mul.8h v0, v0, v1[0] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: vmulq_built_dup_fromsmall_test: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1 +; CHECK-SD-NEXT: mul v0.8h, v0.8h, v1.h[0] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: vmulq_built_dup_fromsmall_test: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1 +; CHECK-GI-NEXT: dup v1.8h, v1.h[0] +; CHECK-GI-NEXT: mul v0.8h, v0.8h, v1.8h +; CHECK-GI-NEXT: ret %vget_lane = extractelement <4 x i16> %b, i32 0 %vecinit.i = insertelement <8 x i16> undef, i16 %vget_lane, i32 0 %vecinit1.i = insertelement <8 x i16> %vecinit.i, i16 %vget_lane, i32 1 @@ -2434,7 +2776,7 @@ define <8 x i16> @vmulq_built_dup_fromsmall_test(<8 x i16> %a, <4 x i16> %b) { define <2 x i64> @mull_from_two_extracts(<4 x i32> %lhs, <4 x i32> %rhs) { ; CHECK-LABEL: mull_from_two_extracts: ; CHECK: // %bb.0: -; CHECK-NEXT: sqdmull2.2d v0, v0, v1 +; CHECK-NEXT: sqdmull2 v0.2d, v0.4s, v1.4s ; CHECK-NEXT: ret %lhs.high = shufflevector <4 x i32> %lhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3> %rhs.high = shufflevector <4 x i32> %rhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3> @@ -2446,7 +2788,7 @@ define <2 x i64> @mull_from_two_extracts(<4 x i32> %lhs, <4 x i32> %rhs) { define <2 x i64> @mlal_from_two_extracts(<2 x i64> %accum, <4 x i32> %lhs, <4 x i32> %rhs) { ; CHECK-LABEL: mlal_from_two_extracts: ; CHECK: // %bb.0: -; CHECK-NEXT: sqdmlal2.2d v0, v1, v2 +; CHECK-NEXT: sqdmlal2 v0.2d, v1.4s, v2.4s ; CHECK-NEXT: ret %lhs.high = shufflevector <4 x i32> %lhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3> %rhs.high = shufflevector <4 x i32> %rhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3> @@ -2459,8 +2801,8 @@ define <2 x i64> @mlal_from_two_extracts(<2 x i64> %accum, <4 x i32> %lhs, <4 x define <2 x i64> @mull_from_extract_dup_low(<4 x i32> %lhs, i32 %rhs) { ; CHECK-LABEL: mull_from_extract_dup_low: ; CHECK: // %bb.0: -; CHECK-NEXT: dup.2s v1, w0 -; CHECK-NEXT: sqdmull.2d v0, v0, v1 +; CHECK-NEXT: dup v1.2s, w0 +; CHECK-NEXT: sqdmull v0.2d, v0.2s, v1.2s ; CHECK-NEXT: ret %rhsvec.tmp = insertelement <2 x i32> undef, i32 %rhs, i32 0 %rhsvec = insertelement <2 x i32> %rhsvec.tmp, i32 %rhs, i32 1 @@ -2472,11 +2814,18 @@ define <2 x i64> @mull_from_extract_dup_low(<4 x i32> %lhs, i32 %rhs) { } define <2 x i64> @mull_from_extract_dup_high(<4 x i32> %lhs, i32 %rhs) { -; CHECK-LABEL: mull_from_extract_dup_high: -; CHECK: // %bb.0: -; CHECK-NEXT: dup.4s v1, w0 -; CHECK-NEXT: sqdmull2.2d v0, v0, v1 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: mull_from_extract_dup_high: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: dup v1.4s, w0 +; CHECK-SD-NEXT: sqdmull2 v0.2d, v0.4s, v1.4s +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: mull_from_extract_dup_high: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: dup v1.2s, w0 +; CHECK-GI-NEXT: mov d0, v0.d[1] +; CHECK-GI-NEXT: sqdmull v0.2d, v0.2s, v1.2s +; CHECK-GI-NEXT: ret %rhsvec.tmp = insertelement <2 x i32> undef, i32 %rhs, i32 0 %rhsvec = insertelement <2 x i32> %rhsvec.tmp, i32 %rhs, i32 1 @@ -2489,8 +2838,8 @@ define <2 x i64> @mull_from_extract_dup_high(<4 x i32> %lhs, i32 %rhs) { define <8 x i16> @pmull_from_extract_dup_low(<16 x i8> %lhs, i8 %rhs) { ; CHECK-LABEL: pmull_from_extract_dup_low: ; CHECK: // %bb.0: -; CHECK-NEXT: dup.8b v1, w0 -; CHECK-NEXT: pmull.8h v0, v0, v1 +; CHECK-NEXT: dup v1.8b, w0 +; CHECK-NEXT: pmull v0.8h, v0.8b, v1.8b ; CHECK-NEXT: ret %rhsvec.0 = insertelement <8 x i8> undef, i8 %rhs, i32 0 %rhsvec = shufflevector <8 x i8> %rhsvec.0, <8 x i8> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0> @@ -2504,8 +2853,8 @@ define <8 x i16> @pmull_from_extract_dup_low(<16 x i8> %lhs, i8 %rhs) { define <8 x i16> @pmull_from_extract_dup_high(<16 x i8> %lhs, i8 %rhs) { ; CHECK-LABEL: pmull_from_extract_dup_high: ; CHECK: // %bb.0: -; CHECK-NEXT: dup.16b v1, w0 -; CHECK-NEXT: pmull2.8h v0, v0, v1 +; CHECK-NEXT: dup v1.16b, w0 +; CHECK-NEXT: pmull2 v0.8h, v0.16b, v1.16b ; CHECK-NEXT: ret %rhsvec.0 = insertelement <8 x i8> undef, i8 %rhs, i32 0 %rhsvec = shufflevector <8 x i8> %rhsvec.0, <8 x i8> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0> @@ -2520,8 +2869,8 @@ define <8 x i16> @pmull_from_extract_duplane_low(<16 x i8> %lhs, <8 x i8> %rhs) ; CHECK-LABEL: pmull_from_extract_duplane_low: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: dup.8b v1, v1[0] -; CHECK-NEXT: pmull.8h v0, v0, v1 +; CHECK-NEXT: dup v1.8b, v1.b[0] +; CHECK-NEXT: pmull v0.8h, v0.8b, v1.8b ; CHECK-NEXT: ret %lhs.high = shufflevector <16 x i8> %lhs, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> %rhs.high = shufflevector <8 x i8> %rhs, <8 x i8> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0> @@ -2534,8 +2883,8 @@ define <8 x i16> @pmull_from_extract_duplane_high(<16 x i8> %lhs, <8 x i8> %rhs) ; CHECK-LABEL: pmull_from_extract_duplane_high: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: dup.16b v1, v1[0] -; CHECK-NEXT: pmull2.8h v0, v0, v1 +; CHECK-NEXT: dup v1.16b, v1.b[0] +; CHECK-NEXT: pmull2 v0.8h, v0.16b, v1.16b ; CHECK-NEXT: ret %lhs.high = shufflevector <16 x i8> %lhs, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> %rhs.high = shufflevector <8 x i8> %rhs, <8 x i8> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0> @@ -2547,7 +2896,7 @@ define <8 x i16> @pmull_from_extract_duplane_high(<16 x i8> %lhs, <8 x i8> %rhs) define <2 x i64> @sqdmull_from_extract_duplane_low(<4 x i32> %lhs, <4 x i32> %rhs) { ; CHECK-LABEL: sqdmull_from_extract_duplane_low: ; CHECK: // %bb.0: -; CHECK-NEXT: sqdmull.2d v0, v0, v1[0] +; CHECK-NEXT: sqdmull v0.2d, v0.2s, v1.s[0] ; CHECK-NEXT: ret %lhs.high = shufflevector <4 x i32> %lhs, <4 x i32> undef, <2 x i32> <i32 0, i32 1> %rhs.high = shufflevector <4 x i32> %rhs, <4 x i32> undef, <2 x i32> <i32 0, i32 0> @@ -2557,10 +2906,16 @@ define <2 x i64> @sqdmull_from_extract_duplane_low(<4 x i32> %lhs, <4 x i32> %rh } define <2 x i64> @sqdmull_from_extract_duplane_high(<4 x i32> %lhs, <4 x i32> %rhs) { -; CHECK-LABEL: sqdmull_from_extract_duplane_high: -; CHECK: // %bb.0: -; CHECK-NEXT: sqdmull2.2d v0, v0, v1[0] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: sqdmull_from_extract_duplane_high: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sqdmull2 v0.2d, v0.4s, v1.s[0] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: sqdmull_from_extract_duplane_high: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov d0, v0.d[1] +; CHECK-GI-NEXT: sqdmull v0.2d, v0.2s, v1.s[0] +; CHECK-GI-NEXT: ret %lhs.high = shufflevector <4 x i32> %lhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3> %rhs.high = shufflevector <4 x i32> %rhs, <4 x i32> undef, <2 x i32> <i32 0, i32 0> @@ -2571,7 +2926,7 @@ define <2 x i64> @sqdmull_from_extract_duplane_high(<4 x i32> %lhs, <4 x i32> %r define <2 x i64> @sqdmlal_from_extract_duplane_low(<2 x i64> %accum, <4 x i32> %lhs, <4 x i32> %rhs) { ; CHECK-LABEL: sqdmlal_from_extract_duplane_low: ; CHECK: // %bb.0: -; CHECK-NEXT: sqdmlal.2d v0, v1, v2[0] +; CHECK-NEXT: sqdmlal v0.2d, v1.2s, v2.s[0] ; CHECK-NEXT: ret %lhs.high = shufflevector <4 x i32> %lhs, <4 x i32> undef, <2 x i32> <i32 0, i32 1> %rhs.high = shufflevector <4 x i32> %rhs, <4 x i32> undef, <2 x i32> <i32 0, i32 0> @@ -2582,10 +2937,16 @@ define <2 x i64> @sqdmlal_from_extract_duplane_low(<2 x i64> %accum, <4 x i32> % } define <2 x i64> @sqdmlal_from_extract_duplane_high(<2 x i64> %accum, <4 x i32> %lhs, <4 x i32> %rhs) { -; CHECK-LABEL: sqdmlal_from_extract_duplane_high: -; CHECK: // %bb.0: -; CHECK-NEXT: sqdmlal2.2d v0, v1, v2[0] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: sqdmlal_from_extract_duplane_high: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sqdmlal2 v0.2d, v1.4s, v2.s[0] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: sqdmlal_from_extract_duplane_high: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov d1, v1.d[1] +; CHECK-GI-NEXT: sqdmlal v0.2d, v1.2s, v2.s[0] +; CHECK-GI-NEXT: ret %lhs.high = shufflevector <4 x i32> %lhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3> %rhs.high = shufflevector <4 x i32> %rhs, <4 x i32> undef, <2 x i32> <i32 0, i32 0> @@ -2597,7 +2958,7 @@ define <2 x i64> @sqdmlal_from_extract_duplane_high(<2 x i64> %accum, <4 x i32> define <2 x i64> @umlal_from_extract_duplane_low(<2 x i64> %accum, <4 x i32> %lhs, <4 x i32> %rhs) { ; CHECK-LABEL: umlal_from_extract_duplane_low: ; CHECK: // %bb.0: -; CHECK-NEXT: umlal.2d v0, v1, v2[0] +; CHECK-NEXT: umlal v0.2d, v1.2s, v2.s[0] ; CHECK-NEXT: ret %lhs.high = shufflevector <4 x i32> %lhs, <4 x i32> undef, <2 x i32> <i32 0, i32 1> %rhs.high = shufflevector <4 x i32> %rhs, <4 x i32> undef, <2 x i32> <i32 0, i32 0> @@ -2608,10 +2969,16 @@ define <2 x i64> @umlal_from_extract_duplane_low(<2 x i64> %accum, <4 x i32> %lh } define <2 x i64> @umlal_from_extract_duplane_high(<2 x i64> %accum, <4 x i32> %lhs, <4 x i32> %rhs) { -; CHECK-LABEL: umlal_from_extract_duplane_high: -; CHECK: // %bb.0: -; CHECK-NEXT: umlal2.2d v0, v1, v2[0] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: umlal_from_extract_duplane_high: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: umlal2 v0.2d, v1.4s, v2.s[0] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: umlal_from_extract_duplane_high: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov d1, v1.d[1] +; CHECK-GI-NEXT: umlal v0.2d, v1.2s, v2.s[0] +; CHECK-GI-NEXT: ret %lhs.high = shufflevector <4 x i32> %lhs, <4 x i32> undef, <2 x i32> <i32 2, i32 3> %rhs.high = shufflevector <4 x i32> %rhs, <4 x i32> undef, <2 x i32> <i32 0, i32 0> @@ -2623,7 +2990,7 @@ define <2 x i64> @umlal_from_extract_duplane_high(<2 x i64> %accum, <4 x i32> %l define float @scalar_fmla_from_extract_v4f32(float %accum, float %lhs, <4 x float> %rvec) { ; CHECK-LABEL: scalar_fmla_from_extract_v4f32: ; CHECK: // %bb.0: -; CHECK-NEXT: fmla.s s0, s1, v2[3] +; CHECK-NEXT: fmla s0, s1, v2.s[3] ; CHECK-NEXT: ret %rhs = extractelement <4 x float> %rvec, i32 3 %res = call float @llvm.fma.f32(float %lhs, float %rhs, float %accum) @@ -2631,11 +2998,18 @@ define float @scalar_fmla_from_extract_v4f32(float %accum, float %lhs, <4 x floa } define float @scalar_fmla_from_extract_v2f32(float %accum, float %lhs, <2 x float> %rvec) { -; CHECK-LABEL: scalar_fmla_from_extract_v2f32: -; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: fmla.s s0, s1, v2[1] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: scalar_fmla_from_extract_v2f32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-SD-NEXT: fmla s0, s1, v2.s[1] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: scalar_fmla_from_extract_v2f32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-GI-NEXT: mov s2, v2.s[1] +; CHECK-GI-NEXT: fmadd s0, s1, s2, s0 +; CHECK-GI-NEXT: ret %rhs = extractelement <2 x float> %rvec, i32 1 %res = call float @llvm.fma.f32(float %lhs, float %rhs, float %accum) ret float %res @@ -2644,7 +3018,7 @@ define float @scalar_fmla_from_extract_v2f32(float %accum, float %lhs, <2 x floa define float @scalar_fmls_from_extract_v4f32(float %accum, float %lhs, <4 x float> %rvec) { ; CHECK-LABEL: scalar_fmls_from_extract_v4f32: ; CHECK: // %bb.0: -; CHECK-NEXT: fmls.s s0, s1, v2[3] +; CHECK-NEXT: fmls s0, s1, v2.s[3] ; CHECK-NEXT: ret %rhs.scal = extractelement <4 x float> %rvec, i32 3 %rhs = fsub float -0.0, %rhs.scal @@ -2656,7 +3030,7 @@ define float @scalar_fmls_from_extract_v2f32(float %accum, float %lhs, <2 x floa ; CHECK-LABEL: scalar_fmls_from_extract_v2f32: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: fmls.s s0, s1, v2[1] +; CHECK-NEXT: fmls s0, s1, v2.s[1] ; CHECK-NEXT: ret %rhs.scal = extractelement <2 x float> %rvec, i32 1 %rhs = fsub float -0.0, %rhs.scal @@ -2669,7 +3043,7 @@ declare float @llvm.fma.f32(float, float, float) define double @scalar_fmla_from_extract_v2f64(double %accum, double %lhs, <2 x double> %rvec) { ; CHECK-LABEL: scalar_fmla_from_extract_v2f64: ; CHECK: // %bb.0: -; CHECK-NEXT: fmla.d d0, d1, v2[1] +; CHECK-NEXT: fmla d0, d1, v2.d[1] ; CHECK-NEXT: ret %rhs = extractelement <2 x double> %rvec, i32 1 %res = call double @llvm.fma.f64(double %lhs, double %rhs, double %accum) @@ -2679,7 +3053,7 @@ define double @scalar_fmla_from_extract_v2f64(double %accum, double %lhs, <2 x d define double @scalar_fmls_from_extract_v2f64(double %accum, double %lhs, <2 x double> %rvec) { ; CHECK-LABEL: scalar_fmls_from_extract_v2f64: ; CHECK: // %bb.0: -; CHECK-NEXT: fmls.d d0, d1, v2[1] +; CHECK-NEXT: fmls d0, d1, v2.d[1] ; CHECK-NEXT: ret %rhs.scal = extractelement <2 x double> %rvec, i32 1 %rhs = fsub double -0.0, %rhs.scal @@ -2692,7 +3066,7 @@ declare double @llvm.fma.f64(double, double, double) define <2 x float> @fmls_with_fneg_before_extract_v2f32(<2 x float> %accum, <2 x float> %lhs, <4 x float> %rhs) { ; CHECK-LABEL: fmls_with_fneg_before_extract_v2f32: ; CHECK: // %bb.0: -; CHECK-NEXT: fmls.2s v0, v1, v2[3] +; CHECK-NEXT: fmls v0.2s, v1.2s, v2.s[3] ; CHECK-NEXT: ret %rhs_neg = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %rhs %splat = shufflevector <4 x float> %rhs_neg, <4 x float> undef, <2 x i32> <i32 3, i32 3> @@ -2704,7 +3078,7 @@ define <2 x float> @fmls_with_fneg_before_extract_v2f32_1(<2 x float> %accum, <2 ; CHECK-LABEL: fmls_with_fneg_before_extract_v2f32_1: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: fmls.2s v0, v1, v2[1] +; CHECK-NEXT: fmls v0.2s, v1.2s, v2.s[1] ; CHECK-NEXT: ret %rhs_neg = fsub <2 x float> <float -0.0, float -0.0>, %rhs %splat = shufflevector <2 x float> %rhs_neg, <2 x float> undef, <2 x i32> <i32 1, i32 1> @@ -2715,7 +3089,7 @@ define <2 x float> @fmls_with_fneg_before_extract_v2f32_1(<2 x float> %accum, <2 define <4 x float> @fmls_with_fneg_before_extract_v4f32(<4 x float> %accum, <4 x float> %lhs, <4 x float> %rhs) { ; CHECK-LABEL: fmls_with_fneg_before_extract_v4f32: ; CHECK: // %bb.0: -; CHECK-NEXT: fmls.4s v0, v1, v2[3] +; CHECK-NEXT: fmls v0.4s, v1.4s, v2.s[3] ; CHECK-NEXT: ret %rhs_neg = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %rhs %splat = shufflevector <4 x float> %rhs_neg, <4 x float> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3> @@ -2727,7 +3101,7 @@ define <4 x float> @fmls_with_fneg_before_extract_v4f32_1(<4 x float> %accum, <4 ; CHECK-LABEL: fmls_with_fneg_before_extract_v4f32_1: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: fmls.4s v0, v1, v2[1] +; CHECK-NEXT: fmls v0.4s, v1.4s, v2.s[1] ; CHECK-NEXT: ret %rhs_neg = fsub <2 x float> <float -0.0, float -0.0>, %rhs %splat = shufflevector <2 x float> %rhs_neg, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> @@ -2738,7 +3112,7 @@ define <4 x float> @fmls_with_fneg_before_extract_v4f32_1(<4 x float> %accum, <4 define <2 x double> @fmls_with_fneg_before_extract_v2f64(<2 x double> %accum, <2 x double> %lhs, <2 x double> %rhs) { ; CHECK-LABEL: fmls_with_fneg_before_extract_v2f64: ; CHECK: // %bb.0: -; CHECK-NEXT: fmls.2d v0, v1, v2[1] +; CHECK-NEXT: fmls v0.2d, v1.2d, v2.d[1] ; CHECK-NEXT: ret %rhs_neg = fsub <2 x double> <double -0.0, double -0.0>, %rhs %splat = shufflevector <2 x double> %rhs_neg, <2 x double> undef, <2 x i32> <i32 1, i32 1> @@ -2770,7 +3144,7 @@ define i32 @sqdmlal_s(i16 %A, i16 %B, i32 %C) nounwind { ; CHECK-NEXT: fmov s0, w0 ; CHECK-NEXT: fmov s1, w1 ; CHECK-NEXT: fmov s2, w2 -; CHECK-NEXT: sqdmlal.h s2, h0, v1[0] +; CHECK-NEXT: sqdmlal s2, h0, v1.h[0] ; CHECK-NEXT: fmov w0, s2 ; CHECK-NEXT: ret %tmp1 = insertelement <4 x i16> undef, i16 %A, i64 0 @@ -2801,7 +3175,7 @@ define i32 @sqdmlsl_s(i16 %A, i16 %B, i32 %C) nounwind { ; CHECK-NEXT: fmov s0, w0 ; CHECK-NEXT: fmov s1, w1 ; CHECK-NEXT: fmov s2, w2 -; CHECK-NEXT: sqdmlsl.h s2, h0, v1[0] +; CHECK-NEXT: sqdmlsl s2, h0, v1.h[0] ; CHECK-NEXT: fmov w0, s2 ; CHECK-NEXT: ret %tmp1 = insertelement <4 x i16> undef, i16 %A, i64 0 @@ -2831,7 +3205,7 @@ define <16 x i8> @test_pmull_64(i64 %l, i64 %r) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: fmov d0, x1 ; CHECK-NEXT: fmov d1, x0 -; CHECK-NEXT: pmull.1q v0, v1, v0 +; CHECK-NEXT: pmull v0.1q, v1.1d, v0.1d ; CHECK-NEXT: ret %val = call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %l, i64 %r) ret <16 x i8> %val @@ -2840,7 +3214,7 @@ define <16 x i8> @test_pmull_64(i64 %l, i64 %r) nounwind { define <16 x i8> @test_pmull_high_64(<2 x i64> %l, <2 x i64> %r) nounwind { ; CHECK-LABEL: test_pmull_high_64: ; CHECK: // %bb.0: -; CHECK-NEXT: pmull2.1q v0, v0, v1 +; CHECK-NEXT: pmull2 v0.1q, v0.2d, v1.2d ; CHECK-NEXT: ret %l_hi = extractelement <2 x i64> %l, i32 1 %r_hi = extractelement <2 x i64> %r, i32 1 @@ -2851,15 +3225,23 @@ define <16 x i8> @test_pmull_high_64(<2 x i64> %l, <2 x i64> %r) nounwind { declare <16 x i8> @llvm.aarch64.neon.pmull64(i64, i64) define <1 x i64> @test_mul_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) nounwind { -; CHECK-LABEL: test_mul_v1i64: -; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1 -; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: fmov x8, d1 -; CHECK-NEXT: fmov x9, d0 -; CHECK-NEXT: mul x8, x9, x8 -; CHECK-NEXT: fmov d0, x8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_mul_v1i64: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: // kill: def $d1 killed $d1 def $q1 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-SD-NEXT: fmov x8, d1 +; CHECK-SD-NEXT: fmov x9, d0 +; CHECK-SD-NEXT: mul x8, x9, x8 +; CHECK-SD-NEXT: fmov d0, x8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_mul_v1i64: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: fmov x9, d1 +; CHECK-GI-NEXT: mul x8, x8, x9 +; CHECK-GI-NEXT: fmov d0, x8 +; CHECK-GI-NEXT: ret %prod = mul <1 x i64> %lhs, %rhs ret <1 x i64> %prod } @@ -2867,7 +3249,7 @@ define <1 x i64> @test_mul_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) nounwind { define <4 x i32> @sqdmlal4s_lib(<4 x i32> %dst, <4 x i16> %v1, <4 x i16> %v2) { ; CHECK-LABEL: sqdmlal4s_lib: ; CHECK: // %bb.0: -; CHECK-NEXT: sqdmlal.4s v0, v1, v2 +; CHECK-NEXT: sqdmlal v0.4s, v1.4h, v2.4h ; CHECK-NEXT: ret %tmp = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %v1, <4 x i16> %v2) %sum = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %dst, <4 x i32> %tmp) @@ -2877,7 +3259,7 @@ define <4 x i32> @sqdmlal4s_lib(<4 x i32> %dst, <4 x i16> %v1, <4 x i16> %v2) { define <2 x i64> @sqdmlal2d_lib(<2 x i64> %dst, <2 x i32> %v1, <2 x i32> %v2) { ; CHECK-LABEL: sqdmlal2d_lib: ; CHECK: // %bb.0: -; CHECK-NEXT: sqdmlal.2d v0, v1, v2 +; CHECK-NEXT: sqdmlal v0.2d, v1.2s, v2.2s ; CHECK-NEXT: ret %tmp = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %v1, <2 x i32> %v2) %sum = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> %dst, <2 x i64> %tmp) @@ -2887,7 +3269,7 @@ define <2 x i64> @sqdmlal2d_lib(<2 x i64> %dst, <2 x i32> %v1, <2 x i32> %v2) { define <4 x i32> @sqdmlal2_4s_lib(<4 x i32> %dst, <8 x i16> %v1, <8 x i16> %v2) { ; CHECK-LABEL: sqdmlal2_4s_lib: ; CHECK: // %bb.0: -; CHECK-NEXT: sqdmlal2.4s v0, v1, v2 +; CHECK-NEXT: sqdmlal2 v0.4s, v1.8h, v2.8h ; CHECK-NEXT: ret %tmp0 = shufflevector <8 x i16> %v1, <8 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7> %tmp1 = shufflevector <8 x i16> %v2, <8 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7> @@ -2899,7 +3281,7 @@ define <4 x i32> @sqdmlal2_4s_lib(<4 x i32> %dst, <8 x i16> %v1, <8 x i16> %v2) define <2 x i64> @sqdmlal2_2d_lib(<2 x i64> %dst, <4 x i32> %v1, <4 x i32> %v2) { ; CHECK-LABEL: sqdmlal2_2d_lib: ; CHECK: // %bb.0: -; CHECK-NEXT: sqdmlal2.2d v0, v1, v2 +; CHECK-NEXT: sqdmlal2 v0.2d, v1.4s, v2.4s ; CHECK-NEXT: ret %tmp0 = shufflevector <4 x i32> %v1, <4 x i32> poison, <2 x i32> <i32 2, i32 3> %tmp1 = shufflevector <4 x i32> %v2, <4 x i32> poison, <2 x i32> <i32 2, i32 3> @@ -2912,7 +3294,7 @@ define <4 x i32> @sqdmlal_lane_4s_lib(<4 x i32> %dst, <4 x i16> %v1, <4 x i16> % ; CHECK-LABEL: sqdmlal_lane_4s_lib: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: sqdmlal.4s v0, v1, v2[3] +; CHECK-NEXT: sqdmlal v0.4s, v1.4h, v2.h[3] ; CHECK-NEXT: ret %tmp0 = shufflevector <4 x i16> %v2, <4 x i16> poison, <4 x i32> <i32 3, i32 3, i32 3, i32 3> %tmp1 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %v1, <4 x i16> %tmp0) @@ -2924,7 +3306,7 @@ define <2 x i64> @sqdmlal_lane_2d_lib(<2 x i64> %dst, <2 x i32> %v1, <2 x i32> % ; CHECK-LABEL: sqdmlal_lane_2d_lib: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: sqdmlal.2d v0, v1, v2[1] +; CHECK-NEXT: sqdmlal v0.2d, v1.2s, v2.s[1] ; CHECK-NEXT: ret %tmp0 = shufflevector <2 x i32> %v2, <2 x i32> poison, <2 x i32> <i32 1, i32 1> %tmp1 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %v1, <2 x i32> %tmp0) @@ -2933,10 +3315,16 @@ define <2 x i64> @sqdmlal_lane_2d_lib(<2 x i64> %dst, <2 x i32> %v1, <2 x i32> % } define <4 x i32> @sqdmlal2_lane_4s_lib(<4 x i32> %dst, <8 x i16> %v1, <8 x i16> %v2) { -; CHECK-LABEL: sqdmlal2_lane_4s_lib: -; CHECK: // %bb.0: -; CHECK-NEXT: sqdmlal2.4s v0, v1, v2[7] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: sqdmlal2_lane_4s_lib: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sqdmlal2 v0.4s, v1.8h, v2.h[7] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: sqdmlal2_lane_4s_lib: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov d1, v1.d[1] +; CHECK-GI-NEXT: sqdmlal v0.4s, v1.4h, v2.h[7] +; CHECK-GI-NEXT: ret %tmp0 = shufflevector <8 x i16> %v1, <8 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7> %tmp1 = shufflevector <8 x i16> %v2, <8 x i16> poison, <4 x i32> <i32 7, i32 7, i32 7, i32 7> %tmp2 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp0, <4 x i16> %tmp1) @@ -2945,10 +3333,16 @@ define <4 x i32> @sqdmlal2_lane_4s_lib(<4 x i32> %dst, <8 x i16> %v1, <8 x i16> } define <2 x i64> @sqdmlal2_lane_2d_lib(<2 x i64> %dst, <4 x i32> %v1, <4 x i32> %v2) { -; CHECK-LABEL: sqdmlal2_lane_2d_lib: -; CHECK: // %bb.0: -; CHECK-NEXT: sqdmlal2.2d v0, v1, v2[1] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: sqdmlal2_lane_2d_lib: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sqdmlal2 v0.2d, v1.4s, v2.s[1] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: sqdmlal2_lane_2d_lib: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov d1, v1.d[1] +; CHECK-GI-NEXT: sqdmlal v0.2d, v1.2s, v2.s[1] +; CHECK-GI-NEXT: ret %tmp0 = shufflevector <4 x i32> %v1, <4 x i32> poison, <2 x i32> <i32 2, i32 3> %tmp1 = shufflevector <4 x i32> %v2, <4 x i32> poison, <2 x i32> <i32 1, i32 1> %tmp2 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp0, <2 x i32> %tmp1) @@ -2959,7 +3353,7 @@ define <2 x i64> @sqdmlal2_lane_2d_lib(<2 x i64> %dst, <4 x i32> %v1, <4 x i32> define <4 x i32> @sqdmlsl4s_lib(<4 x i32> %dst, <4 x i16> %v1, <4 x i16> %v2) { ; CHECK-LABEL: sqdmlsl4s_lib: ; CHECK: // %bb.0: -; CHECK-NEXT: sqdmlsl.4s v0, v1, v2 +; CHECK-NEXT: sqdmlsl v0.4s, v1.4h, v2.4h ; CHECK-NEXT: ret %tmp = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %v1, <4 x i16> %v2) %sum = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %dst, <4 x i32> %tmp) @@ -2969,7 +3363,7 @@ define <4 x i32> @sqdmlsl4s_lib(<4 x i32> %dst, <4 x i16> %v1, <4 x i16> %v2) { define <2 x i64> @sqdmlsl2d_lib(<2 x i64> %dst, <2 x i32> %v1, <2 x i32> %v2) { ; CHECK-LABEL: sqdmlsl2d_lib: ; CHECK: // %bb.0: -; CHECK-NEXT: sqdmlsl.2d v0, v1, v2 +; CHECK-NEXT: sqdmlsl v0.2d, v1.2s, v2.2s ; CHECK-NEXT: ret %tmp = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %v1, <2 x i32> %v2) %sum = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> %dst, <2 x i64> %tmp) @@ -2979,7 +3373,7 @@ define <2 x i64> @sqdmlsl2d_lib(<2 x i64> %dst, <2 x i32> %v1, <2 x i32> %v2) { define <4 x i32> @sqdmlsl2_4s_lib(<4 x i32> %dst, <8 x i16> %v1, <8 x i16> %v2) { ; CHECK-LABEL: sqdmlsl2_4s_lib: ; CHECK: // %bb.0: -; CHECK-NEXT: sqdmlsl2.4s v0, v1, v2 +; CHECK-NEXT: sqdmlsl2 v0.4s, v1.8h, v2.8h ; CHECK-NEXT: ret %tmp0 = shufflevector <8 x i16> %v1, <8 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7> %tmp1 = shufflevector <8 x i16> %v2, <8 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7> @@ -2991,7 +3385,7 @@ define <4 x i32> @sqdmlsl2_4s_lib(<4 x i32> %dst, <8 x i16> %v1, <8 x i16> %v2) define <2 x i64> @sqdmlsl2_2d_lib(<2 x i64> %dst, <4 x i32> %v1, <4 x i32> %v2) { ; CHECK-LABEL: sqdmlsl2_2d_lib: ; CHECK: // %bb.0: -; CHECK-NEXT: sqdmlsl2.2d v0, v1, v2 +; CHECK-NEXT: sqdmlsl2 v0.2d, v1.4s, v2.4s ; CHECK-NEXT: ret %tmp0 = shufflevector <4 x i32> %v1, <4 x i32> poison, <2 x i32> <i32 2, i32 3> %tmp1 = shufflevector <4 x i32> %v2, <4 x i32> poison, <2 x i32> <i32 2, i32 3> @@ -3004,7 +3398,7 @@ define <4 x i32> @sqdmlsl_lane_4s_lib(<4 x i32> %dst, <4 x i16> %v1, <4 x i16> % ; CHECK-LABEL: sqdmlsl_lane_4s_lib: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: sqdmlsl.4s v0, v1, v2[3] +; CHECK-NEXT: sqdmlsl v0.4s, v1.4h, v2.h[3] ; CHECK-NEXT: ret %tmp0 = shufflevector <4 x i16> %v2, <4 x i16> poison, <4 x i32> <i32 3, i32 3, i32 3, i32 3> %tmp1 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %v1, <4 x i16> %tmp0) @@ -3016,7 +3410,7 @@ define <2 x i64> @sqdmlsl_lane_2d_lib(<2 x i64> %dst, <2 x i32> %v1, <2 x i32> % ; CHECK-LABEL: sqdmlsl_lane_2d_lib: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: sqdmlsl.2d v0, v1, v2[1] +; CHECK-NEXT: sqdmlsl v0.2d, v1.2s, v2.s[1] ; CHECK-NEXT: ret %tmp0 = shufflevector <2 x i32> %v2, <2 x i32> poison, <2 x i32> <i32 1, i32 1> %tmp1 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %v1, <2 x i32> %tmp0) @@ -3025,10 +3419,16 @@ define <2 x i64> @sqdmlsl_lane_2d_lib(<2 x i64> %dst, <2 x i32> %v1, <2 x i32> % } define <4 x i32> @sqdmlsl2_lane_4s_lib(<4 x i32> %dst, <8 x i16> %v1, <8 x i16> %v2) { -; CHECK-LABEL: sqdmlsl2_lane_4s_lib: -; CHECK: // %bb.0: -; CHECK-NEXT: sqdmlsl2.4s v0, v1, v2[7] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: sqdmlsl2_lane_4s_lib: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sqdmlsl2 v0.4s, v1.8h, v2.h[7] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: sqdmlsl2_lane_4s_lib: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov d1, v1.d[1] +; CHECK-GI-NEXT: sqdmlsl v0.4s, v1.4h, v2.h[7] +; CHECK-GI-NEXT: ret %tmp0 = shufflevector <8 x i16> %v1, <8 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7> %tmp1 = shufflevector <8 x i16> %v2, <8 x i16> poison, <4 x i32> <i32 7, i32 7, i32 7, i32 7> %tmp2 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp0, <4 x i16> %tmp1) @@ -3037,10 +3437,16 @@ define <4 x i32> @sqdmlsl2_lane_4s_lib(<4 x i32> %dst, <8 x i16> %v1, <8 x i16> } define <2 x i64> @sqdmlsl2_lane_2d_lib(<2 x i64> %dst, <4 x i32> %v1, <4 x i32> %v2) { -; CHECK-LABEL: sqdmlsl2_lane_2d_lib: -; CHECK: // %bb.0: -; CHECK-NEXT: sqdmlsl2.2d v0, v1, v2[1] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: sqdmlsl2_lane_2d_lib: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sqdmlsl2 v0.2d, v1.4s, v2.s[1] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: sqdmlsl2_lane_2d_lib: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov d1, v1.d[1] +; CHECK-GI-NEXT: sqdmlsl v0.2d, v1.2s, v2.s[1] +; CHECK-GI-NEXT: ret %tmp0 = shufflevector <4 x i32> %v1, <4 x i32> poison, <2 x i32> <i32 2, i32 3> %tmp1 = shufflevector <4 x i32> %v2, <4 x i32> poison, <2 x i32> <i32 1, i32 1> %tmp2 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp0, <2 x i32> %tmp1) diff --git a/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll b/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll index 634d1b9..5f5b27a 100644 --- a/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll +++ b/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll @@ -59,37 +59,33 @@ bb27: ; preds = %bb9, %bb8 define void @avoid_promotion_2_and(ptr nocapture noundef %arg) { ; CHECK-LABEL: avoid_promotion_2_and: ; CHECK: ; %bb.0: ; %entry -; CHECK-NEXT: add x8, x0, #32 -; CHECK-NEXT: b LBB1_2 -; CHECK-NEXT: LBB1_1: ; %latch -; CHECK-NEXT: ; in Loop: Header=BB1_2 Depth=1 -; CHECK-NEXT: cmp w9, #2 -; CHECK-NEXT: add x8, x8, #56 -; CHECK-NEXT: b.ls LBB1_4 -; CHECK-NEXT: LBB1_2: ; %loop +; CHECK-NEXT: mov x8, xzr +; CHECK-NEXT: add x9, x0, #32 +; CHECK-NEXT: LBB1_1: ; %loop ; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: ldr w9, [x8, #20] -; CHECK-NEXT: cmp w9, #3 -; CHECK-NEXT: b.lo LBB1_1 -; CHECK-NEXT: ; %bb.3: ; %then -; CHECK-NEXT: ; in Loop: Header=BB1_2 Depth=1 -; CHECK-NEXT: ldp w13, w12, [x8, #12] -; CHECK-NEXT: ldr w10, [x8] +; CHECK-NEXT: ldr w10, [x9, #20] +; CHECK-NEXT: cmp w10, #3 +; CHECK-NEXT: b.lo LBB1_3 +; CHECK-NEXT: ; %bb.2: ; %then +; CHECK-NEXT: ; in Loop: Header=BB1_1 Depth=1 +; CHECK-NEXT: ldp w13, w12, [x9, #12] +; CHECK-NEXT: ldr w10, [x9] ; CHECK-NEXT: ldr x11, [x0] -; CHECK-NEXT: ldr w14, [x8, #8] +; CHECK-NEXT: add x8, x8, #1 +; CHECK-NEXT: ldr w14, [x9, #8] ; CHECK-NEXT: lsl w10, w10, w13 ; CHECK-NEXT: ldrb w11, [x11, x12] ; CHECK-NEXT: eor w10, w10, w11 -; CHECK-NEXT: ldur w11, [x8, #-24] +; CHECK-NEXT: ldur w11, [x9, #-24] ; CHECK-NEXT: and w10, w10, w14 -; CHECK-NEXT: ldp x14, x13, [x8, #-16] -; CHECK-NEXT: str w10, [x8] +; CHECK-NEXT: ldp x14, x13, [x9, #-16] +; CHECK-NEXT: str w10, [x9], #56 ; CHECK-NEXT: and w11, w11, w12 ; CHECK-NEXT: ldrh w15, [x13, w10, uxtw #1] ; CHECK-NEXT: strh w15, [x14, w11, uxtw #1] ; CHECK-NEXT: strh w12, [x13, w10, uxtw #1] ; CHECK-NEXT: b LBB1_1 -; CHECK-NEXT: LBB1_4: ; %exit +; CHECK-NEXT: LBB1_3: ; %exit.critedge ; CHECK-NEXT: ret entry: br label %loop diff --git a/llvm/test/CodeGen/AArch64/calleetypeid-directcall-mismatched.ll b/llvm/test/CodeGen/AArch64/calleetypeid-directcall-mismatched.ll new file mode 100644 index 0000000..c4c54175 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/calleetypeid-directcall-mismatched.ll @@ -0,0 +1,32 @@ +;; Tests that callee_type metadata attached to direct call sites are safely ignored. + +; RUN: llc --call-graph-section -mtriple aarch64-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s + +;; Test that `calleeTypeIds` field is not present in `callSites` +; CHECK-LABEL: callSites: +; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] } +; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] } +; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] } +define i32 @foo(i32 %x, i32 %y) !type !0 { +entry: + ;; Call instruction with accurate callee_type. + ;; callee_type should be dropped seemlessly. + %call = call i32 @fizz(i32 %x, i32 %y), !callee_type !1 + ;; Call instruction with mismatched callee_type. + ;; callee_type should be dropped seemlessly without errors. + %call1 = call i32 @fizz(i32 %x, i32 %y), !callee_type !3 + %add = add nsw i32 %call, %call1 + ;; Call instruction with mismatched callee_type. + ;; callee_type should be dropped seemlessly without errors. + %call2 = call i32 @fizz(i32 %add, i32 %y), !callee_type !3 + %sub = sub nsw i32 %add, %call2 + ret i32 %sub +} + +declare !type !2 i32 @fizz(i32, i32) + +!0 = !{i64 0, !"_ZTSFiiiiE.generalized"} +!1 = !{!2} +!2 = !{i64 0, !"_ZTSFiiiE.generalized"} +!3 = !{!4} +!4 = !{i64 0, !"_ZTSFicE.generalized"} diff --git a/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid-tailcall.ll b/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid-tailcall.ll new file mode 100644 index 0000000..b47607e --- /dev/null +++ b/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid-tailcall.ll @@ -0,0 +1,19 @@ +;; Tests that call site callee type ids can be extracted and set from +;; callee_type metadata for indirect tail calls. + +;; Verify the exact calleeTypeId value to ensure it is not garbage but the value +;; computed as the type id from the callee_type metadata. +; RUN: llc --call-graph-section -mtriple aarch64-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s + +define i32 @check_tailcall(ptr %func, i8 %x) !type !0 { +entry: + ; CHECK: callSites: + ; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds: + ; CHECK-NEXT: [ 3498816979441845844 ] } + %call = tail call i32 %func(i8 signext %x), !callee_type !1 + ret i32 %call +} + +!0 = !{i64 0, !"_ZTSFiPvcE.generalized"} +!1 = !{!2} +!2 = !{i64 0, !"_ZTSFicE.generalized"} diff --git a/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid.ll b/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid.ll new file mode 100644 index 0000000..94b657c --- /dev/null +++ b/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid.ll @@ -0,0 +1,20 @@ +;; Tests that call site callee type ids can be extracted and set from +;; callee_type metadata. + +;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value +;; computed as the type id from the callee_type metadata. +; RUN: llc --call-graph-section -mtriple aarch64-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s + +; CHECK: name: main +; CHECK: callSites: +; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds: +; CHECK-NEXT: [ 7854600665770582568 ] } +define i32 @main() { +entry: + %fn = load ptr, ptr null, align 8 + call void %fn(i8 0), !callee_type !0 + ret i32 0 +} + +!0 = !{!1} +!1 = !{i64 0, !"_ZTSFvcE.generalized"} diff --git a/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll b/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll index 5765e0a..b3ce9d2 100644 --- a/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll +++ b/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll @@ -1,14 +1,21 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s +; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD +; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" target triple = "arm64" define i1 @test_EQ_IllEbT(i64 %a, i64 %b) { -; CHECK-LABEL: test_EQ_IllEbT: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: cmn x0, x1 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_EQ_IllEbT: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: cmn x0, x1 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_EQ_IllEbT: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: cmn x1, x0 +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret entry: %add = sub i64 0, %b %cmp = icmp eq i64 %add, %a @@ -16,11 +23,19 @@ entry: } define i1 @test_EQ_IliEbT(i64 %a, i32 %b) { -; CHECK-LABEL: test_EQ_IliEbT: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: cmn x0, w1, sxtw -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_EQ_IliEbT: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: cmn x0, w1, sxtw +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_EQ_IliEbT: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: // kill: def $w1 killed $w1 def $x1 +; CHECK-GI-NEXT: sxtw x8, w1 +; CHECK-GI-NEXT: cmn x8, x0 +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret entry: %conv = sext i32 %b to i64 %add = sub i64 0, %a @@ -55,11 +70,19 @@ entry: } define i1 @test_EQ_IilEbT(i32 %a, i64 %b) { -; CHECK-LABEL: test_EQ_IilEbT: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: cmn x1, w0, sxtw -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_EQ_IilEbT: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: cmn x1, w0, sxtw +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_EQ_IilEbT: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: // kill: def $w0 killed $w0 def $x0 +; CHECK-GI-NEXT: sxtw x8, w0 +; CHECK-GI-NEXT: cmn x8, x1 +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret entry: %conv = sext i32 %a to i64 %add = sub i64 0, %b @@ -68,11 +91,17 @@ entry: } define i1 @test_EQ_IiiEbT(i32 %a, i32 %b) { -; CHECK-LABEL: test_EQ_IiiEbT: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: cmn w0, w1 -; CHECK-NEXT: cset w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_EQ_IiiEbT: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: cmn w0, w1 +; CHECK-SD-NEXT: cset w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_EQ_IiiEbT: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: cmn w1, w0 +; CHECK-GI-NEXT: cset w0, eq +; CHECK-GI-NEXT: ret entry: %add = sub i32 0, %b %cmp = icmp eq i32 %add, %a @@ -218,11 +247,17 @@ entry: } define i1 @test_NE_IllEbT(i64 %a, i64 %b) { -; CHECK-LABEL: test_NE_IllEbT: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: cmn x0, x1 -; CHECK-NEXT: cset w0, ne -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_NE_IllEbT: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: cmn x0, x1 +; CHECK-SD-NEXT: cset w0, ne +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_NE_IllEbT: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: cmn x1, x0 +; CHECK-GI-NEXT: cset w0, ne +; CHECK-GI-NEXT: ret entry: %add = sub i64 0, %b %cmp = icmp ne i64 %add, %a @@ -230,11 +265,19 @@ entry: } define i1 @test_NE_IliEbT(i64 %a, i32 %b) { -; CHECK-LABEL: test_NE_IliEbT: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: cmn x0, w1, sxtw -; CHECK-NEXT: cset w0, ne -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_NE_IliEbT: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: cmn x0, w1, sxtw +; CHECK-SD-NEXT: cset w0, ne +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_NE_IliEbT: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: // kill: def $w1 killed $w1 def $x1 +; CHECK-GI-NEXT: sxtw x8, w1 +; CHECK-GI-NEXT: cmn x8, x0 +; CHECK-GI-NEXT: cset w0, ne +; CHECK-GI-NEXT: ret entry: %conv = sext i32 %b to i64 %add = sub i64 0, %a @@ -269,11 +312,19 @@ entry: } define i1 @test_NE_IilEbT(i32 %a, i64 %b) { -; CHECK-LABEL: test_NE_IilEbT: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: cmn x1, w0, sxtw -; CHECK-NEXT: cset w0, ne -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_NE_IilEbT: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: cmn x1, w0, sxtw +; CHECK-SD-NEXT: cset w0, ne +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_NE_IilEbT: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: // kill: def $w0 killed $w0 def $x0 +; CHECK-GI-NEXT: sxtw x8, w0 +; CHECK-GI-NEXT: cmn x8, x1 +; CHECK-GI-NEXT: cset w0, ne +; CHECK-GI-NEXT: ret entry: %conv = sext i32 %a to i64 %add = sub i64 0, %b @@ -282,11 +333,17 @@ entry: } define i1 @test_NE_IiiEbT(i32 %a, i32 %b) { -; CHECK-LABEL: test_NE_IiiEbT: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: cmn w0, w1 -; CHECK-NEXT: cset w0, ne -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_NE_IiiEbT: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: cmn w0, w1 +; CHECK-SD-NEXT: cset w0, ne +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_NE_IiiEbT: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: cmn w1, w0 +; CHECK-GI-NEXT: cset w0, ne +; CHECK-GI-NEXT: ret entry: %add = sub i32 0, %b %cmp = icmp ne i32 %add, %a @@ -444,161 +501,281 @@ define i1 @cmn_large_imm(i32 %a) { } define i1 @almost_immediate_neg_slt(i32 %x) { -; CHECK-LABEL: almost_immediate_neg_slt: -; CHECK: // %bb.0: -; CHECK-NEXT: cmn w0, #4079, lsl #12 // =16707584 -; CHECK-NEXT: cset w0, le -; CHECK-NEXT: ret +; CHECK-SD-LABEL: almost_immediate_neg_slt: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmn w0, #4079, lsl #12 // =16707584 +; CHECK-SD-NEXT: cset w0, le +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: almost_immediate_neg_slt: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #4097 // =0x1001 +; CHECK-GI-NEXT: movk w8, #65281, lsl #16 +; CHECK-GI-NEXT: cmp w0, w8 +; CHECK-GI-NEXT: cset w0, lt +; CHECK-GI-NEXT: ret %cmp = icmp slt i32 %x, -16707583 ret i1 %cmp } define i1 @almost_immediate_neg_slt_64(i64 %x) { -; CHECK-LABEL: almost_immediate_neg_slt_64: -; CHECK: // %bb.0: -; CHECK-NEXT: cmn x0, #4079, lsl #12 // =16707584 -; CHECK-NEXT: cset w0, le -; CHECK-NEXT: ret +; CHECK-SD-LABEL: almost_immediate_neg_slt_64: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmn x0, #4079, lsl #12 // =16707584 +; CHECK-SD-NEXT: cset w0, le +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: almost_immediate_neg_slt_64: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov x8, #-61439 // =0xffffffffffff1001 +; CHECK-GI-NEXT: movk x8, #65281, lsl #16 +; CHECK-GI-NEXT: cmp x0, x8 +; CHECK-GI-NEXT: cset w0, lt +; CHECK-GI-NEXT: ret %cmp = icmp slt i64 %x, -16707583 ret i1 %cmp } define i1 @almost_immediate_neg_sge(i32 %x) { -; CHECK-LABEL: almost_immediate_neg_sge: -; CHECK: // %bb.0: -; CHECK-NEXT: cmn w0, #4079, lsl #12 // =16707584 -; CHECK-NEXT: cset w0, gt -; CHECK-NEXT: ret +; CHECK-SD-LABEL: almost_immediate_neg_sge: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmn w0, #4079, lsl #12 // =16707584 +; CHECK-SD-NEXT: cset w0, gt +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: almost_immediate_neg_sge: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #4097 // =0x1001 +; CHECK-GI-NEXT: movk w8, #65281, lsl #16 +; CHECK-GI-NEXT: cmp w0, w8 +; CHECK-GI-NEXT: cset w0, ge +; CHECK-GI-NEXT: ret %cmp = icmp sge i32 %x, -16707583 ret i1 %cmp } define i1 @almost_immediate_neg_sge_64(i64 %x) { -; CHECK-LABEL: almost_immediate_neg_sge_64: -; CHECK: // %bb.0: -; CHECK-NEXT: cmn x0, #4079, lsl #12 // =16707584 -; CHECK-NEXT: cset w0, gt -; CHECK-NEXT: ret +; CHECK-SD-LABEL: almost_immediate_neg_sge_64: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmn x0, #4079, lsl #12 // =16707584 +; CHECK-SD-NEXT: cset w0, gt +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: almost_immediate_neg_sge_64: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov x8, #-61439 // =0xffffffffffff1001 +; CHECK-GI-NEXT: movk x8, #65281, lsl #16 +; CHECK-GI-NEXT: cmp x0, x8 +; CHECK-GI-NEXT: cset w0, ge +; CHECK-GI-NEXT: ret %cmp = icmp sge i64 %x, -16707583 ret i1 %cmp } define i1 @almost_immediate_neg_uge(i32 %x) { -; CHECK-LABEL: almost_immediate_neg_uge: -; CHECK: // %bb.0: -; CHECK-NEXT: cmn w0, #4079, lsl #12 // =16707584 -; CHECK-NEXT: cset w0, hi -; CHECK-NEXT: ret +; CHECK-SD-LABEL: almost_immediate_neg_uge: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmn w0, #4079, lsl #12 // =16707584 +; CHECK-SD-NEXT: cset w0, hi +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: almost_immediate_neg_uge: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #4097 // =0x1001 +; CHECK-GI-NEXT: movk w8, #65281, lsl #16 +; CHECK-GI-NEXT: cmp w0, w8 +; CHECK-GI-NEXT: cset w0, hs +; CHECK-GI-NEXT: ret %cmp = icmp uge i32 %x, -16707583 ret i1 %cmp } define i1 @almost_immediate_neg_uge_64(i64 %x) { -; CHECK-LABEL: almost_immediate_neg_uge_64: -; CHECK: // %bb.0: -; CHECK-NEXT: cmn x0, #4079, lsl #12 // =16707584 -; CHECK-NEXT: cset w0, hi -; CHECK-NEXT: ret +; CHECK-SD-LABEL: almost_immediate_neg_uge_64: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmn x0, #4079, lsl #12 // =16707584 +; CHECK-SD-NEXT: cset w0, hi +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: almost_immediate_neg_uge_64: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov x8, #-61439 // =0xffffffffffff1001 +; CHECK-GI-NEXT: movk x8, #65281, lsl #16 +; CHECK-GI-NEXT: cmp x0, x8 +; CHECK-GI-NEXT: cset w0, hs +; CHECK-GI-NEXT: ret %cmp = icmp uge i64 %x, -16707583 ret i1 %cmp } define i1 @almost_immediate_neg_ult(i32 %x) { -; CHECK-LABEL: almost_immediate_neg_ult: -; CHECK: // %bb.0: -; CHECK-NEXT: cmn w0, #4079, lsl #12 // =16707584 -; CHECK-NEXT: cset w0, ls -; CHECK-NEXT: ret +; CHECK-SD-LABEL: almost_immediate_neg_ult: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmn w0, #4079, lsl #12 // =16707584 +; CHECK-SD-NEXT: cset w0, ls +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: almost_immediate_neg_ult: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #4097 // =0x1001 +; CHECK-GI-NEXT: movk w8, #65281, lsl #16 +; CHECK-GI-NEXT: cmp w0, w8 +; CHECK-GI-NEXT: cset w0, lo +; CHECK-GI-NEXT: ret %cmp = icmp ult i32 %x, -16707583 ret i1 %cmp } define i1 @almost_immediate_neg_ult_64(i64 %x) { -; CHECK-LABEL: almost_immediate_neg_ult_64: -; CHECK: // %bb.0: -; CHECK-NEXT: cmn x0, #4079, lsl #12 // =16707584 -; CHECK-NEXT: cset w0, ls -; CHECK-NEXT: ret +; CHECK-SD-LABEL: almost_immediate_neg_ult_64: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmn x0, #4079, lsl #12 // =16707584 +; CHECK-SD-NEXT: cset w0, ls +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: almost_immediate_neg_ult_64: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov x8, #-61439 // =0xffffffffffff1001 +; CHECK-GI-NEXT: movk x8, #65281, lsl #16 +; CHECK-GI-NEXT: cmp x0, x8 +; CHECK-GI-NEXT: cset w0, lo +; CHECK-GI-NEXT: ret %cmp = icmp ult i64 %x, -16707583 ret i1 %cmp } define i1 @almost_immediate_neg_sle(i32 %x) { -; CHECK-LABEL: almost_immediate_neg_sle: -; CHECK: // %bb.0: -; CHECK-NEXT: cmn w0, #4095, lsl #12 // =16773120 -; CHECK-NEXT: cset w0, lt -; CHECK-NEXT: ret +; CHECK-SD-LABEL: almost_immediate_neg_sle: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmn w0, #4095, lsl #12 // =16773120 +; CHECK-SD-NEXT: cset w0, lt +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: almost_immediate_neg_sle: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #-16773121 // =0xff000fff +; CHECK-GI-NEXT: cmp w0, w8 +; CHECK-GI-NEXT: cset w0, le +; CHECK-GI-NEXT: ret %cmp = icmp sle i32 %x, -16773121 ret i1 %cmp } define i1 @almost_immediate_neg_sle_64(i64 %x) { -; CHECK-LABEL: almost_immediate_neg_sle_64: -; CHECK: // %bb.0: -; CHECK-NEXT: cmn x0, #4095, lsl #12 // =16773120 -; CHECK-NEXT: cset w0, lt -; CHECK-NEXT: ret +; CHECK-SD-LABEL: almost_immediate_neg_sle_64: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmn x0, #4095, lsl #12 // =16773120 +; CHECK-SD-NEXT: cset w0, lt +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: almost_immediate_neg_sle_64: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov x8, #-16773121 // =0xffffffffff000fff +; CHECK-GI-NEXT: cmp x0, x8 +; CHECK-GI-NEXT: cset w0, le +; CHECK-GI-NEXT: ret %cmp = icmp sle i64 %x, -16773121 ret i1 %cmp } define i1 @almost_immediate_neg_sgt(i32 %x) { -; CHECK-LABEL: almost_immediate_neg_sgt: -; CHECK: // %bb.0: -; CHECK-NEXT: cmn w0, #4095, lsl #12 // =16773120 -; CHECK-NEXT: cset w0, ge -; CHECK-NEXT: ret +; CHECK-SD-LABEL: almost_immediate_neg_sgt: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmn w0, #4095, lsl #12 // =16773120 +; CHECK-SD-NEXT: cset w0, ge +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: almost_immediate_neg_sgt: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #-16773121 // =0xff000fff +; CHECK-GI-NEXT: cmp w0, w8 +; CHECK-GI-NEXT: cset w0, gt +; CHECK-GI-NEXT: ret %cmp = icmp sgt i32 %x, -16773121 ret i1 %cmp } define i1 @almost_immediate_neg_sgt_64(i64 %x) { -; CHECK-LABEL: almost_immediate_neg_sgt_64: -; CHECK: // %bb.0: -; CHECK-NEXT: cmn x0, #4095, lsl #12 // =16773120 -; CHECK-NEXT: cset w0, ge -; CHECK-NEXT: ret +; CHECK-SD-LABEL: almost_immediate_neg_sgt_64: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmn x0, #4095, lsl #12 // =16773120 +; CHECK-SD-NEXT: cset w0, ge +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: almost_immediate_neg_sgt_64: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov x8, #-16773121 // =0xffffffffff000fff +; CHECK-GI-NEXT: cmp x0, x8 +; CHECK-GI-NEXT: cset w0, gt +; CHECK-GI-NEXT: ret %cmp = icmp sgt i64 %x, -16773121 ret i1 %cmp } define i1 @almost_immediate_neg_ule(i32 %x) { -; CHECK-LABEL: almost_immediate_neg_ule: -; CHECK: // %bb.0: -; CHECK-NEXT: cmn w0, #4095, lsl #12 // =16773120 -; CHECK-NEXT: cset w0, lo -; CHECK-NEXT: ret +; CHECK-SD-LABEL: almost_immediate_neg_ule: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmn w0, #4095, lsl #12 // =16773120 +; CHECK-SD-NEXT: cset w0, lo +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: almost_immediate_neg_ule: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #-16773121 // =0xff000fff +; CHECK-GI-NEXT: cmp w0, w8 +; CHECK-GI-NEXT: cset w0, ls +; CHECK-GI-NEXT: ret %cmp = icmp ule i32 %x, -16773121 ret i1 %cmp } define i1 @almost_immediate_neg_ule_64(i64 %x) { -; CHECK-LABEL: almost_immediate_neg_ule_64: -; CHECK: // %bb.0: -; CHECK-NEXT: cmn x0, #4095, lsl #12 // =16773120 -; CHECK-NEXT: cset w0, lo -; CHECK-NEXT: ret +; CHECK-SD-LABEL: almost_immediate_neg_ule_64: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmn x0, #4095, lsl #12 // =16773120 +; CHECK-SD-NEXT: cset w0, lo +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: almost_immediate_neg_ule_64: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov x8, #-16773121 // =0xffffffffff000fff +; CHECK-GI-NEXT: cmp x0, x8 +; CHECK-GI-NEXT: cset w0, ls +; CHECK-GI-NEXT: ret %cmp = icmp ule i64 %x, -16773121 ret i1 %cmp } define i1 @almost_immediate_neg_ugt(i32 %x) { -; CHECK-LABEL: almost_immediate_neg_ugt: -; CHECK: // %bb.0: -; CHECK-NEXT: cmn w0, #4095, lsl #12 // =16773120 -; CHECK-NEXT: cset w0, hs -; CHECK-NEXT: ret +; CHECK-SD-LABEL: almost_immediate_neg_ugt: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmn w0, #4095, lsl #12 // =16773120 +; CHECK-SD-NEXT: cset w0, hs +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: almost_immediate_neg_ugt: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #-16773121 // =0xff000fff +; CHECK-GI-NEXT: cmp w0, w8 +; CHECK-GI-NEXT: cset w0, hi +; CHECK-GI-NEXT: ret %cmp = icmp ugt i32 %x, -16773121 ret i1 %cmp } define i1 @almost_immediate_neg_ugt_64(i64 %x) { -; CHECK-LABEL: almost_immediate_neg_ugt_64: -; CHECK: // %bb.0: -; CHECK-NEXT: cmn x0, #4095, lsl #12 // =16773120 -; CHECK-NEXT: cset w0, hs -; CHECK-NEXT: ret +; CHECK-SD-LABEL: almost_immediate_neg_ugt_64: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmn x0, #4095, lsl #12 // =16773120 +; CHECK-SD-NEXT: cset w0, hs +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: almost_immediate_neg_ugt_64: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov x8, #-16773121 // =0xffffffffff000fff +; CHECK-GI-NEXT: cmp x0, x8 +; CHECK-GI-NEXT: cset w0, hi +; CHECK-GI-NEXT: ret %cmp = icmp ugt i64 %x, -16773121 ret i1 %cmp } @@ -637,6 +814,24 @@ define i1 @cmn_nsw_neg(i32 %a, i32 %b) { ret i1 %cmp } +define i1 @cmn_swap(i32 %a, i32 %b) { +; CHECK-SD-LABEL: cmn_swap: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmn w0, w1 +; CHECK-SD-NEXT: cset w0, lt +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: cmn_swap: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: cmn w1, w0 +; CHECK-GI-NEXT: cset w0, lt +; CHECK-GI-NEXT: ret + %sub = sub nsw i32 0, %b + %cmp = icmp sgt i32 %sub, %a + ret i1 %cmp +} + + define i1 @cmn_nsw_neg_64(i64 %a, i64 %b) { ; CHECK-LABEL: cmn_nsw_neg_64: ; CHECK: // %bb.0: diff --git a/llvm/test/CodeGen/AArch64/combine-and-like.ll b/llvm/test/CodeGen/AArch64/combine-and-like.ll index 15770c2..ea1359b 100644 --- a/llvm/test/CodeGen/AArch64/combine-and-like.ll +++ b/llvm/test/CodeGen/AArch64/combine-and-like.ll @@ -4,7 +4,6 @@ define i32 @f(i32 %a0) { ; CHECK-LABEL: f: ; CHECK: // %bb.0: -; CHECK-NEXT: mov w0, wzr ; CHECK-NEXT: ret %1 = lshr i32 %a0, 2147483647 %2 = add i32 %1, 2147483647 diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll index 880bd29..d67aa08 100644 --- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll +++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll @@ -14,20 +14,19 @@ target triple = "aarch64" define %"class.std::complex" @complex_mul_v2f64(ptr %a, ptr %b) { ; CHECK-LABEL: complex_mul_v2f64: ; CHECK: // %bb.0: // %entry +; CHECK-NEXT: movi v0.2d, #0000000000000000 ; CHECK-NEXT: movi v1.2d, #0000000000000000 ; CHECK-NEXT: mov w8, #100 // =0x64 -; CHECK-NEXT: cntd x9 ; CHECK-NEXT: whilelo p1.d, xzr, x8 +; CHECK-NEXT: cntd x9 ; CHECK-NEXT: rdvl x10, #2 -; CHECK-NEXT: mov x11, x9 ; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: zip2 z0.d, z1.d, z1.d -; CHECK-NEXT: zip1 z1.d, z1.d, z1.d +; CHECK-NEXT: mov x11, x9 ; CHECK-NEXT: .LBB0_1: // %vector.body ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: zip2 p2.d, p1.d, p1.d -; CHECK-NEXT: mov z6.d, z1.d -; CHECK-NEXT: mov z7.d, z0.d +; CHECK-NEXT: mov z6.d, z0.d +; CHECK-NEXT: mov z7.d, z1.d ; CHECK-NEXT: zip1 p1.d, p1.d, p1.d ; CHECK-NEXT: ld1d { z2.d }, p2/z, [x0, #1, mul vl] ; CHECK-NEXT: ld1d { z4.d }, p2/z, [x1, #1, mul vl] @@ -39,14 +38,14 @@ define %"class.std::complex" @complex_mul_v2f64(ptr %a, ptr %b) { ; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #0 ; CHECK-NEXT: fcmla z7.d, p0/m, z4.d, z2.d, #90 ; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #90 -; CHECK-NEXT: mov z0.d, p2/m, z7.d -; CHECK-NEXT: mov z1.d, p1/m, z6.d +; CHECK-NEXT: mov z1.d, p2/m, z7.d +; CHECK-NEXT: mov z0.d, p1/m, z6.d ; CHECK-NEXT: whilelo p1.d, x11, x8 ; CHECK-NEXT: add x11, x11, x9 ; CHECK-NEXT: b.mi .LBB0_1 ; CHECK-NEXT: // %bb.2: // %exit.block -; CHECK-NEXT: uzp1 z2.d, z1.d, z0.d -; CHECK-NEXT: uzp2 z1.d, z1.d, z0.d +; CHECK-NEXT: uzp1 z2.d, z0.d, z1.d +; CHECK-NEXT: uzp2 z1.d, z0.d, z1.d ; CHECK-NEXT: faddv d0, p0, z2.d ; CHECK-NEXT: faddv d1, p0, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 @@ -111,21 +110,20 @@ exit.block: ; preds = %vector.body define %"class.std::complex" @complex_mul_predicated_v2f64(ptr %a, ptr %b, ptr %cond) { ; CHECK-LABEL: complex_mul_predicated_v2f64: ; CHECK: // %bb.0: // %entry +; CHECK-NEXT: movi v0.2d, #0000000000000000 ; CHECK-NEXT: movi v1.2d, #0000000000000000 ; CHECK-NEXT: cntd x9 -; CHECK-NEXT: mov w11, #100 // =0x64 ; CHECK-NEXT: neg x10, x9 +; CHECK-NEXT: mov w11, #100 // =0x64 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, xzr ; CHECK-NEXT: and x10, x10, x11 ; CHECK-NEXT: rdvl x11, #2 -; CHECK-NEXT: zip2 z0.d, z1.d, z1.d -; CHECK-NEXT: zip1 z1.d, z1.d, z1.d ; CHECK-NEXT: .LBB1_1: // %vector.body ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: ld1w { z2.d }, p0/z, [x2, x8, lsl #2] -; CHECK-NEXT: mov z6.d, z1.d -; CHECK-NEXT: mov z7.d, z0.d +; CHECK-NEXT: mov z6.d, z0.d +; CHECK-NEXT: mov z7.d, z1.d ; CHECK-NEXT: add x8, x8, x9 ; CHECK-NEXT: cmpne p1.d, p0/z, z2.d, #0 ; CHECK-NEXT: cmp x10, x8 @@ -141,12 +139,12 @@ define %"class.std::complex" @complex_mul_predicated_v2f64(ptr %a, ptr %b, ptr % ; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #0 ; CHECK-NEXT: fcmla z7.d, p0/m, z4.d, z2.d, #90 ; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #90 -; CHECK-NEXT: mov z0.d, p2/m, z7.d -; CHECK-NEXT: mov z1.d, p1/m, z6.d +; CHECK-NEXT: mov z1.d, p2/m, z7.d +; CHECK-NEXT: mov z0.d, p1/m, z6.d ; CHECK-NEXT: b.ne .LBB1_1 ; CHECK-NEXT: // %bb.2: // %exit.block -; CHECK-NEXT: uzp1 z2.d, z1.d, z0.d -; CHECK-NEXT: uzp2 z1.d, z1.d, z0.d +; CHECK-NEXT: uzp1 z2.d, z0.d, z1.d +; CHECK-NEXT: uzp2 z1.d, z0.d, z1.d ; CHECK-NEXT: faddv d0, p0, z2.d ; CHECK-NEXT: faddv d1, p0, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 @@ -213,21 +211,20 @@ exit.block: ; preds = %vector.body define %"class.std::complex" @complex_mul_predicated_x2_v2f64(ptr %a, ptr %b, ptr %cond) { ; CHECK-LABEL: complex_mul_predicated_x2_v2f64: ; CHECK: // %bb.0: // %entry +; CHECK-NEXT: movi v0.2d, #0000000000000000 ; CHECK-NEXT: movi v1.2d, #0000000000000000 ; CHECK-NEXT: mov w8, #100 // =0x64 -; CHECK-NEXT: cntd x9 ; CHECK-NEXT: whilelo p1.d, xzr, x8 +; CHECK-NEXT: cntd x9 ; CHECK-NEXT: rdvl x10, #2 -; CHECK-NEXT: cnth x11 ; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: cnth x11 ; CHECK-NEXT: mov x12, x9 -; CHECK-NEXT: zip2 z0.d, z1.d, z1.d -; CHECK-NEXT: zip1 z1.d, z1.d, z1.d ; CHECK-NEXT: .LBB2_1: // %vector.body ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: ld1w { z2.d }, p1/z, [x2] -; CHECK-NEXT: mov z6.d, z1.d -; CHECK-NEXT: mov z7.d, z0.d +; CHECK-NEXT: mov z6.d, z0.d +; CHECK-NEXT: mov z7.d, z1.d ; CHECK-NEXT: add x2, x2, x11 ; CHECK-NEXT: and z2.d, z2.d, #0xffffffff ; CHECK-NEXT: cmpne p1.d, p1/z, z2.d, #0 @@ -243,14 +240,14 @@ define %"class.std::complex" @complex_mul_predicated_x2_v2f64(ptr %a, ptr %b, pt ; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #0 ; CHECK-NEXT: fcmla z7.d, p0/m, z4.d, z2.d, #90 ; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #90 -; CHECK-NEXT: mov z0.d, p2/m, z7.d -; CHECK-NEXT: mov z1.d, p1/m, z6.d +; CHECK-NEXT: mov z1.d, p2/m, z7.d +; CHECK-NEXT: mov z0.d, p1/m, z6.d ; CHECK-NEXT: whilelo p1.d, x12, x8 ; CHECK-NEXT: add x12, x12, x9 ; CHECK-NEXT: b.mi .LBB2_1 ; CHECK-NEXT: // %bb.2: // %exit.block -; CHECK-NEXT: uzp1 z2.d, z1.d, z0.d -; CHECK-NEXT: uzp2 z1.d, z1.d, z0.d +; CHECK-NEXT: uzp1 z2.d, z0.d, z1.d +; CHECK-NEXT: uzp2 z1.d, z0.d, z1.d ; CHECK-NEXT: faddv d0, p0, z2.d ; CHECK-NEXT: faddv d1, p0, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll index 29be231..0646ca4 100644 --- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll +++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll @@ -14,15 +14,14 @@ target triple = "aarch64" define %"class.std::complex" @complex_mul_v2f64(ptr %a, ptr %b) { ; CHECK-LABEL: complex_mul_v2f64: ; CHECK: // %bb.0: // %entry +; CHECK-NEXT: movi v0.2d, #0000000000000000 ; CHECK-NEXT: movi v1.2d, #0000000000000000 ; CHECK-NEXT: cntd x8 -; CHECK-NEXT: mov w10, #100 // =0x64 ; CHECK-NEXT: neg x9, x8 +; CHECK-NEXT: mov w10, #100 // =0x64 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: and x9, x9, x10 ; CHECK-NEXT: rdvl x10, #2 -; CHECK-NEXT: zip2 z0.d, z1.d, z1.d -; CHECK-NEXT: zip1 z1.d, z1.d, z1.d ; CHECK-NEXT: .LBB0_1: // %vector.body ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: ldr z2, [x0, #1, mul vl] @@ -32,14 +31,14 @@ define %"class.std::complex" @complex_mul_v2f64(ptr %a, ptr %b) { ; CHECK-NEXT: ldr z5, [x1] ; CHECK-NEXT: add x1, x1, x10 ; CHECK-NEXT: add x0, x0, x10 -; CHECK-NEXT: fcmla z1.d, p0/m, z5.d, z3.d, #0 -; CHECK-NEXT: fcmla z0.d, p0/m, z4.d, z2.d, #0 -; CHECK-NEXT: fcmla z1.d, p0/m, z5.d, z3.d, #90 -; CHECK-NEXT: fcmla z0.d, p0/m, z4.d, z2.d, #90 +; CHECK-NEXT: fcmla z0.d, p0/m, z5.d, z3.d, #0 +; CHECK-NEXT: fcmla z1.d, p0/m, z4.d, z2.d, #0 +; CHECK-NEXT: fcmla z0.d, p0/m, z5.d, z3.d, #90 +; CHECK-NEXT: fcmla z1.d, p0/m, z4.d, z2.d, #90 ; CHECK-NEXT: b.ne .LBB0_1 ; CHECK-NEXT: // %bb.2: // %exit.block -; CHECK-NEXT: uzp1 z2.d, z1.d, z0.d -; CHECK-NEXT: uzp2 z1.d, z1.d, z0.d +; CHECK-NEXT: uzp1 z2.d, z0.d, z1.d +; CHECK-NEXT: uzp2 z1.d, z0.d, z1.d ; CHECK-NEXT: faddv d0, p0, z2.d ; CHECK-NEXT: faddv d1, p0, z1.d ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 @@ -183,17 +182,16 @@ exit.block: ; preds = %vector.body define %"class.std::complex" @complex_mul_v2f64_unrolled(ptr %a, ptr %b) { ; CHECK-LABEL: complex_mul_v2f64_unrolled: ; CHECK: // %bb.0: // %entry +; CHECK-NEXT: movi v0.2d, #0000000000000000 ; CHECK-NEXT: movi v1.2d, #0000000000000000 ; CHECK-NEXT: cntw x8 -; CHECK-NEXT: mov w10, #1000 // =0x3e8 +; CHECK-NEXT: movi v2.2d, #0000000000000000 +; CHECK-NEXT: movi v3.2d, #0000000000000000 ; CHECK-NEXT: neg x9, x8 +; CHECK-NEXT: mov w10, #1000 // =0x3e8 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: and x9, x9, x10 ; CHECK-NEXT: rdvl x10, #4 -; CHECK-NEXT: zip2 z0.d, z1.d, z1.d -; CHECK-NEXT: zip1 z1.d, z1.d, z1.d -; CHECK-NEXT: mov z2.d, z1.d -; CHECK-NEXT: mov z3.d, z0.d ; CHECK-NEXT: .LBB2_1: // %vector.body ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: ldr z4, [x0, #1, mul vl] @@ -207,20 +205,20 @@ define %"class.std::complex" @complex_mul_v2f64_unrolled(ptr %a, ptr %b) { ; CHECK-NEXT: ldr z18, [x1, #3, mul vl] ; CHECK-NEXT: ldr z19, [x1, #2, mul vl] ; CHECK-NEXT: add x1, x1, x10 -; CHECK-NEXT: fcmla z1.d, p0/m, z16.d, z5.d, #0 -; CHECK-NEXT: fcmla z0.d, p0/m, z7.d, z4.d, #0 +; CHECK-NEXT: fcmla z0.d, p0/m, z16.d, z5.d, #0 +; CHECK-NEXT: fcmla z1.d, p0/m, z7.d, z4.d, #0 ; CHECK-NEXT: fcmla z3.d, p0/m, z18.d, z6.d, #0 ; CHECK-NEXT: fcmla z2.d, p0/m, z19.d, z17.d, #0 -; CHECK-NEXT: fcmla z1.d, p0/m, z16.d, z5.d, #90 -; CHECK-NEXT: fcmla z0.d, p0/m, z7.d, z4.d, #90 +; CHECK-NEXT: fcmla z0.d, p0/m, z16.d, z5.d, #90 +; CHECK-NEXT: fcmla z1.d, p0/m, z7.d, z4.d, #90 ; CHECK-NEXT: fcmla z3.d, p0/m, z18.d, z6.d, #90 ; CHECK-NEXT: fcmla z2.d, p0/m, z19.d, z17.d, #90 ; CHECK-NEXT: b.ne .LBB2_1 ; CHECK-NEXT: // %bb.2: // %exit.block ; CHECK-NEXT: uzp1 z4.d, z2.d, z3.d -; CHECK-NEXT: uzp1 z5.d, z1.d, z0.d +; CHECK-NEXT: uzp1 z5.d, z0.d, z1.d ; CHECK-NEXT: uzp2 z2.d, z2.d, z3.d -; CHECK-NEXT: uzp2 z0.d, z1.d, z0.d +; CHECK-NEXT: uzp2 z0.d, z0.d, z1.d ; CHECK-NEXT: fadd z1.d, z4.d, z5.d ; CHECK-NEXT: fadd z2.d, z2.d, z0.d ; CHECK-NEXT: faddv d0, p0, z1.d @@ -310,15 +308,15 @@ define dso_local %"class.std::complex" @reduction_mix(ptr %a, ptr %b, ptr noalia ; CHECK-LABEL: reduction_mix: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: movi v2.2d, #0000000000000000 +; CHECK-NEXT: movi v0.2d, #0000000000000000 ; CHECK-NEXT: cntd x9 -; CHECK-NEXT: mov w11, #100 // =0x64 +; CHECK-NEXT: movi v1.2d, #0000000000000000 ; CHECK-NEXT: neg x10, x9 +; CHECK-NEXT: mov w11, #100 // =0x64 ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov x8, xzr ; CHECK-NEXT: and x10, x10, x11 ; CHECK-NEXT: rdvl x11, #2 -; CHECK-NEXT: zip2 z0.d, z2.d, z2.d -; CHECK-NEXT: zip1 z1.d, z2.d, z2.d ; CHECK-NEXT: .LBB3_1: // %vector.body ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: ldr z3, [x0] @@ -327,13 +325,13 @@ define dso_local %"class.std::complex" @reduction_mix(ptr %a, ptr %b, ptr noalia ; CHECK-NEXT: ld1w { z5.d }, p0/z, [x3, x8, lsl #2] ; CHECK-NEXT: add x8, x8, x9 ; CHECK-NEXT: cmp x10, x8 -; CHECK-NEXT: fadd z0.d, z4.d, z0.d -; CHECK-NEXT: fadd z1.d, z3.d, z1.d +; CHECK-NEXT: fadd z1.d, z4.d, z1.d +; CHECK-NEXT: fadd z0.d, z3.d, z0.d ; CHECK-NEXT: add z2.d, z5.d, z2.d ; CHECK-NEXT: b.ne .LBB3_1 ; CHECK-NEXT: // %bb.2: // %middle.block -; CHECK-NEXT: uzp2 z3.d, z1.d, z0.d -; CHECK-NEXT: uzp1 z1.d, z1.d, z0.d +; CHECK-NEXT: uzp2 z3.d, z0.d, z1.d +; CHECK-NEXT: uzp1 z1.d, z0.d, z1.d ; CHECK-NEXT: uaddv d2, p0, z2.d ; CHECK-NEXT: faddv d0, p0, z3.d ; CHECK-NEXT: faddv d1, p0, z1.d diff --git a/llvm/test/CodeGen/AArch64/constant-pool-partition.ll b/llvm/test/CodeGen/AArch64/constant-pool-partition.ll index d444713..9f4b3e2 100644 --- a/llvm/test/CodeGen/AArch64/constant-pool-partition.ll +++ b/llvm/test/CodeGen/AArch64/constant-pool-partition.ll @@ -19,11 +19,11 @@ ; function, constant pools for this constant should not have `.unlikely` suffix. ;; Constant pools for function @cold_func. -; CHECK: .section .rodata.cst8.hot,"aM",@progbits,8 +; CHECK: .section .rodata.cst8.hot.,"aM",@progbits,8 ; CHECK-NEXT: .p2align ; CHECK-NEXT: .LCPI0_0: ; CHECK-NEXT: .xword 0x3fe5c28f5c28f5c3 // double 0.68000000000000005 -; CHECK-NEXT: .section .rodata.cst8.unlikely,"aM",@progbits,8 +; CHECK-NEXT: .section .rodata.cst8.unlikely.,"aM",@progbits,8 ; CHECK-NEXT: .p2align ; CHECK-NEXT: .LCPI0_1: ; CHECK-NEXT: .xword 0x3fe5eb851eb851ec // double 0.68500000000000005 @@ -58,7 +58,7 @@ ; CHECK-NEXT: .word 3 // 0x3 ; CHECK-NEXT: .word 5 // 0x5 ; CHECK-NEXT: .word 7 // 0x7 -; CHECK-NEXT: .section .rodata.cst16.hot,"aM",@progbits,16 +; CHECK-NEXT: .section .rodata.cst16.hot.,"aM",@progbits,16 ; CHECK-NEXT: .p2align ; CHECK-NEXT: .LCPI1_2: ; CHECK-NEXT: .word 442 // 0x1ba @@ -67,11 +67,11 @@ ; CHECK-NEXT: .word 0 // 0x0 ;; Constant pools for function @hot_func -; CHECK: .section .rodata.cst8.hot,"aM",@progbits,8 +; CHECK: .section .rodata.cst8.hot.,"aM",@progbits,8 ; CHECK-NEXT: .p2align ; CHECK-NEXT: .LCPI2_0: ; CHECK-NEXT: .xword 0x3fe5c28f5c28f5c3 // double 0.68000000000000005 -; CHECK-NEXT: .section .rodata.cst16.hot,"aM",@progbits,16 +; CHECK-NEXT: .section .rodata.cst16.hot.,"aM",@progbits,16 ; CHECK-NEXT: .p2align ; CHECK-NEXT: .LCPI2_1: ; CHECK-NEXT: .word 0 // 0x0 diff --git a/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll b/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll index a9618fd..05ecc9e 100644 --- a/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll +++ b/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll @@ -131,18 +131,83 @@ define <4 x i64> @interleave2_v4i64(<2 x i64> %vec0, <2 x i64> %vec1) { ret <4 x i64> %retval } +define <4 x i16> @interleave2_same_const_splat_v4i16() { +; CHECK-SD-LABEL: interleave2_same_const_splat_v4i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: movi v0.4h, #3 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: interleave2_same_const_splat_v4i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #3 // =0x3 +; CHECK-GI-NEXT: fmov s0, w8 +; CHECK-GI-NEXT: mov v0.h[1], w8 +; CHECK-GI-NEXT: zip1 v0.4h, v0.4h, v0.4h +; CHECK-GI-NEXT: ret + %retval = call <4 x i16> @llvm.vector.interleave2.v4i16(<2 x i16> splat(i16 3), <2 x i16> splat(i16 3)) + ret <4 x i16> %retval +} + +define <4 x i16> @interleave2_diff_const_splat_v4i16() { +; CHECK-SD-LABEL: interleave2_diff_const_splat_v4i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: adrp x8, .LCPI11_0 +; CHECK-SD-NEXT: ldr d0, [x8, :lo12:.LCPI11_0] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: interleave2_diff_const_splat_v4i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #3 // =0x3 +; CHECK-GI-NEXT: mov w9, #4 // =0x4 +; CHECK-GI-NEXT: fmov s0, w8 +; CHECK-GI-NEXT: fmov s1, w9 +; CHECK-GI-NEXT: mov v0.h[1], w8 +; CHECK-GI-NEXT: mov v1.h[1], w9 +; CHECK-GI-NEXT: zip1 v0.4h, v0.4h, v1.4h +; CHECK-GI-NEXT: ret + %retval = call <4 x i16> @llvm.vector.interleave2.v4i16(<2 x i16> splat(i16 3), <2 x i16> splat(i16 4)) + ret <4 x i16> %retval +} -; Float declarations -declare <4 x half> @llvm.vector.interleave2.v4f16(<2 x half>, <2 x half>) -declare <8 x half> @llvm.vector.interleave2.v8f16(<4 x half>, <4 x half>) -declare <16 x half> @llvm.vector.interleave2.v16f16(<8 x half>, <8 x half>) -declare <4 x float> @llvm.vector.interleave2.v4f32(<2 x float>, <2 x float>) -declare <8 x float> @llvm.vector.interleave2.v8f32(<4 x float>, <4 x float>) -declare <4 x double> @llvm.vector.interleave2.v4f64(<2 x double>, <2 x double>) - -; Integer declarations -declare <32 x i8> @llvm.vector.interleave2.v32i8(<16 x i8>, <16 x i8>) -declare <16 x i16> @llvm.vector.interleave2.v16i16(<8 x i16>, <8 x i16>) -declare <8 x i32> @llvm.vector.interleave2.v8i32(<4 x i32>, <4 x i32>) -declare <4 x i64> @llvm.vector.interleave2.v4i64(<2 x i64>, <2 x i64>) +define <4 x i16> @interleave2_same_nonconst_splat_v4i16(i16 %a) { +; CHECK-SD-LABEL: interleave2_same_nonconst_splat_v4i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: dup v0.4h, w0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: interleave2_same_nonconst_splat_v4i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: dup v0.4h, w0 +; CHECK-GI-NEXT: zip1 v0.4h, v0.4h, v0.4h +; CHECK-GI-NEXT: ret + %ins = insertelement <2 x i16> poison, i16 %a, i32 0 + %splat = shufflevector <2 x i16> %ins, <2 x i16> poison, <2 x i32> <i32 0, i32 0> + %retval = call <4 x i16> @llvm.vector.interleave2.v4i16(<2 x i16> %splat, <2 x i16> %splat) + ret <4 x i16> %retval +} + +define <4 x i16> @interleave2_diff_nonconst_splat_v4i16(i16 %a, i16 %b) { +; CHECK-SD-LABEL: interleave2_diff_nonconst_splat_v4i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: fmov s0, w0 +; CHECK-SD-NEXT: mov v0.h[1], w0 +; CHECK-SD-NEXT: mov v0.h[2], w1 +; CHECK-SD-NEXT: mov v0.h[3], w1 +; CHECK-SD-NEXT: rev32 v1.4h, v0.4h +; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: interleave2_diff_nonconst_splat_v4i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: dup v0.4h, w0 +; CHECK-GI-NEXT: dup v1.4h, w1 +; CHECK-GI-NEXT: zip1 v0.4h, v0.4h, v1.4h +; CHECK-GI-NEXT: ret + %ins1 = insertelement <2 x i16> poison, i16 %a, i32 0 + %splat1 = shufflevector <2 x i16> %ins1, <2 x i16> poison, <2 x i32> <i32 0, i32 0> + %ins2 = insertelement <2 x i16> poison, i16 %b, i32 0 + %splat2 = shufflevector <2 x i16> %ins2, <2 x i16> poison, <2 x i32> <i32 0, i32 0> + %retval = call <4 x i16> @llvm.vector.interleave2.v4i16(<2 x i16> %splat1, <2 x i16> %splat2) + ret <4 x i16> %retval +} diff --git a/llvm/test/CodeGen/AArch64/implicit-def-subreg-to-reg-regression.ll b/llvm/test/CodeGen/AArch64/implicit-def-subreg-to-reg-regression.ll index 0f208f8..374def5 100644 --- a/llvm/test/CodeGen/AArch64/implicit-def-subreg-to-reg-regression.ll +++ b/llvm/test/CodeGen/AArch64/implicit-def-subreg-to-reg-regression.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 -; RUN: llc -aarch64-min-jump-table-entries=4 -mtriple=arm64-apple-ios < %s | FileCheck %s +; RUN: llc -aarch64-min-jump-table-entries=4 -mtriple=arm64-apple-ios -enable-subreg-liveness=false < %s | sed -e "/; kill: /d" | FileCheck %s +; RUN: llc -aarch64-min-jump-table-entries=4 -mtriple=arm64-apple-ios -enable-subreg-liveness=true < %s | FileCheck %s ; Check there's no assert in spilling from implicit-def operands on an ; IMPLICIT_DEF. @@ -92,7 +93,6 @@ define void @widget(i32 %arg, i32 %arg1, ptr %arg2, ptr %arg3, ptr %arg4, i32 %a ; CHECK-NEXT: ldr x8, [sp, #40] ; 8-byte Folded Reload ; CHECK-NEXT: mov x0, xzr ; CHECK-NEXT: mov x1, xzr -; CHECK-NEXT: ; kill: def $w8 killed $w8 killed $x8 def $x8 ; CHECK-NEXT: str x8, [sp] ; CHECK-NEXT: bl _fprintf ; CHECK-NEXT: brk #0x1 diff --git a/llvm/test/CodeGen/AArch64/late-taildup-computed-goto.ll b/llvm/test/CodeGen/AArch64/late-taildup-computed-goto.ll index c4a027c..381904f 100644 --- a/llvm/test/CodeGen/AArch64/late-taildup-computed-goto.ll +++ b/llvm/test/CodeGen/AArch64/late-taildup-computed-goto.ll @@ -25,77 +25,58 @@ define void @test_interp(ptr %frame, ptr %dst) { ; CHECK-NEXT: adrp x21, _opcode.targets@PAGE ; CHECK-NEXT: Lloh1: ; CHECK-NEXT: add x21, x21, _opcode.targets@PAGEOFF -; CHECK-NEXT: mov x22, xzr +; CHECK-NEXT: mov x24, xzr ; CHECK-NEXT: add x8, x21, xzr, lsl #3 ; CHECK-NEXT: mov x19, x1 ; CHECK-NEXT: mov x20, x0 -; CHECK-NEXT: add x23, x22, #1 +; CHECK-NEXT: mov x23, xzr +; CHECK-NEXT: mov w22, #1 ; =0x1 +; CHECK-NEXT: add x24, x24, #1 ; CHECK-NEXT: br x8 ; CHECK-NEXT: Ltmp0: ; Block address taken ; CHECK-NEXT: LBB0_1: ; %loop.header ; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: add x8, x21, x23, lsl #3 +; CHECK-NEXT: add x8, x21, x24, lsl #3 ; CHECK-NEXT: mov x20, xzr -; CHECK-NEXT: mov x22, xzr -; CHECK-NEXT: add x23, x23, #1 +; CHECK-NEXT: mov x23, xzr +; CHECK-NEXT: add x24, x24, #1 ; CHECK-NEXT: br x8 ; CHECK-NEXT: Ltmp1: ; Block address taken ; CHECK-NEXT: LBB0_2: ; %op1.bb -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: str xzr, [x19] -; CHECK-NEXT: mov w8, #1 ; =0x1 +; CHECK-NEXT: Ltmp2: ; Block address taken +; CHECK-NEXT: LBB0_3: ; %op6.bb +; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: ldr x0, [x20, #-8]! -; CHECK-NEXT: ldr x9, [x0, #8] -; CHECK-NEXT: str x8, [x0] -; CHECK-NEXT: ldr x8, [x9, #48] +; CHECK-NEXT: ldr x8, [x0, #8] +; CHECK-NEXT: str x22, [x0] +; CHECK-NEXT: ldr x8, [x8, #48] ; CHECK-NEXT: blr x8 -; CHECK-NEXT: add x8, x21, x23, lsl #3 -; CHECK-NEXT: add x23, x23, #1 +; CHECK-NEXT: add x8, x21, x24, lsl #3 +; CHECK-NEXT: add x24, x24, #1 ; CHECK-NEXT: br x8 -; CHECK-NEXT: Ltmp2: ; Block address taken -; CHECK-NEXT: LBB0_3: ; %op2.bb +; CHECK-NEXT: Ltmp3: ; Block address taken +; CHECK-NEXT: LBB0_4: ; %op2.bb ; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: add x8, x21, x23, lsl #3 +; CHECK-NEXT: add x8, x21, x24, lsl #3 ; CHECK-NEXT: mov x20, xzr -; CHECK-NEXT: add x23, x23, #1 -; CHECK-NEXT: str x22, [x19] -; CHECK-NEXT: mov x22, xzr +; CHECK-NEXT: str x23, [x19] +; CHECK-NEXT: mov x23, xzr +; CHECK-NEXT: add x24, x24, #1 ; CHECK-NEXT: br x8 -; CHECK-NEXT: Ltmp3: ; Block address taken -; CHECK-NEXT: LBB0_4: ; %op4.bb -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: str x22, [x19] -; CHECK-NEXT: add x10, x21, x23, lsl #3 -; CHECK-NEXT: add x23, x23, #1 -; CHECK-NEXT: ldur x8, [x22, #12] -; CHECK-NEXT: ldur x9, [x20, #-8] -; CHECK-NEXT: add x22, x22, #20 -; CHECK-NEXT: stp x8, x9, [x20, #-8] -; CHECK-NEXT: add x20, x20, #8 -; CHECK-NEXT: br x10 ; CHECK-NEXT: Ltmp4: ; Block address taken -; CHECK-NEXT: LBB0_5: ; %op5.bb +; CHECK-NEXT: LBB0_5: ; %op4.bb +; CHECK-NEXT: Ltmp5: ; Block address taken +; CHECK-NEXT: LBB0_6: ; %op5.bb ; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: str x22, [x19] -; CHECK-NEXT: add x10, x21, x23, lsl #3 -; CHECK-NEXT: add x23, x23, #1 -; CHECK-NEXT: ldur x8, [x22, #12] +; CHECK-NEXT: str x23, [x19] +; CHECK-NEXT: ldur x8, [x23, #12] ; CHECK-NEXT: ldur x9, [x20, #-8] -; CHECK-NEXT: add x22, x22, #20 +; CHECK-NEXT: add x23, x23, #20 ; CHECK-NEXT: stp x8, x9, [x20, #-8] +; CHECK-NEXT: add x8, x21, x24, lsl #3 ; CHECK-NEXT: add x20, x20, #8 -; CHECK-NEXT: br x10 -; CHECK-NEXT: Ltmp5: ; Block address taken -; CHECK-NEXT: LBB0_6: ; %op6.bb -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: ldr x0, [x20, #-8]! -; CHECK-NEXT: mov w8, #1 ; =0x1 -; CHECK-NEXT: ldr x9, [x0, #8] -; CHECK-NEXT: str x8, [x0] -; CHECK-NEXT: ldr x8, [x9, #48] -; CHECK-NEXT: blr x8 -; CHECK-NEXT: add x8, x21, x23, lsl #3 -; CHECK-NEXT: add x23, x23, #1 +; CHECK-NEXT: add x24, x24, #1 ; CHECK-NEXT: br x8 ; CHECK-NEXT: .loh AdrpAdd Lloh0, Lloh1 entry: diff --git a/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll b/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll index 9912c7a..81f13b8 100644 --- a/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll +++ b/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s +; RUN: llc -mtriple=aarch64-none-elf < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD +; RUN: llc -mtriple=aarch64-none-elf -global-isel < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI @var1_32 = global i32 0 @var2_32 = global i32 0 @@ -243,26 +244,48 @@ define void @logical_64bit() minsize { } define void @flag_setting() { -; CHECK-LABEL: flag_setting: -; CHECK: // %bb.0: -; CHECK-NEXT: adrp x8, :got:var1_64 -; CHECK-NEXT: adrp x10, :got:var2_64 -; CHECK-NEXT: ldr x8, [x8, :got_lo12:var1_64] -; CHECK-NEXT: ldr x10, [x10, :got_lo12:var2_64] -; CHECK-NEXT: ldr x9, [x8] -; CHECK-NEXT: ldr x10, [x10] -; CHECK-NEXT: tst x9, x10 -; CHECK-NEXT: b.gt .LBB2_4 -; CHECK-NEXT: // %bb.1: // %test2 -; CHECK-NEXT: tst x9, x10, lsl #63 -; CHECK-NEXT: b.lt .LBB2_4 -; CHECK-NEXT: // %bb.2: // %test3 -; CHECK-NEXT: tst x9, x10, asr #12 -; CHECK-NEXT: b.gt .LBB2_4 -; CHECK-NEXT: // %bb.3: // %other_exit -; CHECK-NEXT: str x9, [x8] -; CHECK-NEXT: .LBB2_4: // %common.ret -; CHECK-NEXT: ret +; CHECK-SD-LABEL: flag_setting: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: adrp x8, :got:var1_64 +; CHECK-SD-NEXT: adrp x10, :got:var2_64 +; CHECK-SD-NEXT: ldr x8, [x8, :got_lo12:var1_64] +; CHECK-SD-NEXT: ldr x10, [x10, :got_lo12:var2_64] +; CHECK-SD-NEXT: ldr x9, [x8] +; CHECK-SD-NEXT: ldr x10, [x10] +; CHECK-SD-NEXT: tst x9, x10 +; CHECK-SD-NEXT: b.gt .LBB2_4 +; CHECK-SD-NEXT: // %bb.1: // %test2 +; CHECK-SD-NEXT: tst x9, x10, lsl #63 +; CHECK-SD-NEXT: b.lt .LBB2_4 +; CHECK-SD-NEXT: // %bb.2: // %test3 +; CHECK-SD-NEXT: tst x9, x10, asr #12 +; CHECK-SD-NEXT: b.gt .LBB2_4 +; CHECK-SD-NEXT: // %bb.3: // %other_exit +; CHECK-SD-NEXT: str x9, [x8] +; CHECK-SD-NEXT: .LBB2_4: // %common.ret +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: flag_setting: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: adrp x8, :got:var1_64 +; CHECK-GI-NEXT: adrp x10, :got:var2_64 +; CHECK-GI-NEXT: ldr x8, [x8, :got_lo12:var1_64] +; CHECK-GI-NEXT: ldr x10, [x10, :got_lo12:var2_64] +; CHECK-GI-NEXT: ldr x9, [x8] +; CHECK-GI-NEXT: ldr x10, [x10] +; CHECK-GI-NEXT: tst x9, x10 +; CHECK-GI-NEXT: b.gt .LBB2_4 +; CHECK-GI-NEXT: // %bb.1: // %test2 +; CHECK-GI-NEXT: tst x9, x10, lsl #63 +; CHECK-GI-NEXT: b.lt .LBB2_4 +; CHECK-GI-NEXT: // %bb.2: // %test3 +; CHECK-GI-NEXT: asr x10, x10, #12 +; CHECK-GI-NEXT: tst x10, x9 +; CHECK-GI-NEXT: b.gt .LBB2_4 +; CHECK-GI-NEXT: // %bb.3: // %other_exit +; CHECK-GI-NEXT: str x9, [x8] +; CHECK-GI-NEXT: .LBB2_4: // %common.ret +; CHECK-GI-NEXT: ret %val1 = load i64, ptr @var1_64 %val2 = load i64, ptr @var2_64 diff --git a/llvm/test/CodeGen/AArch64/midpoint-int.ll b/llvm/test/CodeGen/AArch64/midpoint-int.ll index bbdce7c..15c1dff 100644 --- a/llvm/test/CodeGen/AArch64/midpoint-int.ll +++ b/llvm/test/CodeGen/AArch64/midpoint-int.ll @@ -13,10 +13,9 @@ define i32 @scalar_i32_signed_reg_reg(i32 %a1, i32 %a2) nounwind { ; CHECK-LABEL: scalar_i32_signed_reg_reg: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w9, w1, w0 -; CHECK-NEXT: subs w10, w0, w1 +; CHECK-NEXT: subs w9, w0, w1 ; CHECK-NEXT: mov w8, #-1 // =0xffffffff -; CHECK-NEXT: csel w9, w10, w9, gt +; CHECK-NEXT: cneg w9, w9, le ; CHECK-NEXT: cneg w8, w8, le ; CHECK-NEXT: lsr w9, w9, #1 ; CHECK-NEXT: madd w0, w9, w8, w0 @@ -35,10 +34,9 @@ define i32 @scalar_i32_signed_reg_reg(i32 %a1, i32 %a2) nounwind { define i32 @scalar_i32_unsigned_reg_reg(i32 %a1, i32 %a2) nounwind { ; CHECK-LABEL: scalar_i32_unsigned_reg_reg: ; CHECK: // %bb.0: -; CHECK-NEXT: sub w9, w1, w0 -; CHECK-NEXT: subs w10, w0, w1 +; CHECK-NEXT: subs w9, w0, w1 ; CHECK-NEXT: mov w8, #-1 // =0xffffffff -; CHECK-NEXT: csel w9, w10, w9, hi +; CHECK-NEXT: cneg w9, w9, ls ; CHECK-NEXT: cneg w8, w8, ls ; CHECK-NEXT: lsr w9, w9, #1 ; CHECK-NEXT: madd w0, w9, w8, w0 @@ -61,11 +59,9 @@ define i32 @scalar_i32_signed_mem_reg(ptr %a1_addr, i32 %a2) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr w9, [x0] ; CHECK-NEXT: mov w8, #-1 // =0xffffffff -; CHECK-NEXT: cmp w9, w1 -; CHECK-NEXT: sub w10, w1, w9 +; CHECK-NEXT: subs w10, w9, w1 +; CHECK-NEXT: cneg w10, w10, le ; CHECK-NEXT: cneg w8, w8, le -; CHECK-NEXT: subs w11, w9, w1 -; CHECK-NEXT: csel w10, w11, w10, gt ; CHECK-NEXT: lsr w10, w10, #1 ; CHECK-NEXT: madd w0, w10, w8, w9 ; CHECK-NEXT: ret @@ -86,11 +82,9 @@ define i32 @scalar_i32_signed_reg_mem(i32 %a1, ptr %a2_addr) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr w9, [x1] ; CHECK-NEXT: mov w8, #-1 // =0xffffffff -; CHECK-NEXT: cmp w0, w9 -; CHECK-NEXT: sub w10, w9, w0 -; CHECK-NEXT: cneg w8, w8, le ; CHECK-NEXT: subs w9, w0, w9 -; CHECK-NEXT: csel w9, w9, w10, gt +; CHECK-NEXT: cneg w9, w9, le +; CHECK-NEXT: cneg w8, w8, le ; CHECK-NEXT: lsr w9, w9, #1 ; CHECK-NEXT: madd w0, w9, w8, w0 ; CHECK-NEXT: ret @@ -112,11 +106,9 @@ define i32 @scalar_i32_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind { ; CHECK-NEXT: ldr w9, [x0] ; CHECK-NEXT: ldr w10, [x1] ; CHECK-NEXT: mov w8, #-1 // =0xffffffff -; CHECK-NEXT: cmp w9, w10 -; CHECK-NEXT: sub w11, w10, w9 -; CHECK-NEXT: cneg w8, w8, le ; CHECK-NEXT: subs w10, w9, w10 -; CHECK-NEXT: csel w10, w10, w11, gt +; CHECK-NEXT: cneg w10, w10, le +; CHECK-NEXT: cneg w8, w8, le ; CHECK-NEXT: lsr w10, w10, #1 ; CHECK-NEXT: madd w0, w10, w8, w9 ; CHECK-NEXT: ret @@ -142,10 +134,9 @@ define i32 @scalar_i32_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind { define i64 @scalar_i64_signed_reg_reg(i64 %a1, i64 %a2) nounwind { ; CHECK-LABEL: scalar_i64_signed_reg_reg: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x9, x1, x0 -; CHECK-NEXT: subs x10, x0, x1 +; CHECK-NEXT: subs x9, x0, x1 ; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff -; CHECK-NEXT: csel x9, x10, x9, gt +; CHECK-NEXT: cneg x9, x9, le ; CHECK-NEXT: cneg x8, x8, le ; CHECK-NEXT: lsr x9, x9, #1 ; CHECK-NEXT: madd x0, x9, x8, x0 @@ -164,10 +155,9 @@ define i64 @scalar_i64_signed_reg_reg(i64 %a1, i64 %a2) nounwind { define i64 @scalar_i64_unsigned_reg_reg(i64 %a1, i64 %a2) nounwind { ; CHECK-LABEL: scalar_i64_unsigned_reg_reg: ; CHECK: // %bb.0: -; CHECK-NEXT: sub x9, x1, x0 -; CHECK-NEXT: subs x10, x0, x1 +; CHECK-NEXT: subs x9, x0, x1 ; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff -; CHECK-NEXT: csel x9, x10, x9, hi +; CHECK-NEXT: cneg x9, x9, ls ; CHECK-NEXT: cneg x8, x8, ls ; CHECK-NEXT: lsr x9, x9, #1 ; CHECK-NEXT: madd x0, x9, x8, x0 @@ -190,11 +180,9 @@ define i64 @scalar_i64_signed_mem_reg(ptr %a1_addr, i64 %a2) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr x9, [x0] ; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff -; CHECK-NEXT: cmp x9, x1 -; CHECK-NEXT: sub x10, x1, x9 +; CHECK-NEXT: subs x10, x9, x1 +; CHECK-NEXT: cneg x10, x10, le ; CHECK-NEXT: cneg x8, x8, le -; CHECK-NEXT: subs x11, x9, x1 -; CHECK-NEXT: csel x10, x11, x10, gt ; CHECK-NEXT: lsr x10, x10, #1 ; CHECK-NEXT: madd x0, x10, x8, x9 ; CHECK-NEXT: ret @@ -215,11 +203,9 @@ define i64 @scalar_i64_signed_reg_mem(i64 %a1, ptr %a2_addr) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: ldr x9, [x1] ; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff -; CHECK-NEXT: cmp x0, x9 -; CHECK-NEXT: sub x10, x9, x0 -; CHECK-NEXT: cneg x8, x8, le ; CHECK-NEXT: subs x9, x0, x9 -; CHECK-NEXT: csel x9, x9, x10, gt +; CHECK-NEXT: cneg x9, x9, le +; CHECK-NEXT: cneg x8, x8, le ; CHECK-NEXT: lsr x9, x9, #1 ; CHECK-NEXT: madd x0, x9, x8, x0 ; CHECK-NEXT: ret @@ -241,11 +227,9 @@ define i64 @scalar_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind { ; CHECK-NEXT: ldr x9, [x0] ; CHECK-NEXT: ldr x10, [x1] ; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff -; CHECK-NEXT: cmp x9, x10 -; CHECK-NEXT: sub x11, x10, x9 -; CHECK-NEXT: cneg x8, x8, le ; CHECK-NEXT: subs x10, x9, x10 -; CHECK-NEXT: csel x10, x10, x11, gt +; CHECK-NEXT: cneg x10, x10, le +; CHECK-NEXT: cneg x8, x8, le ; CHECK-NEXT: lsr x10, x10, #1 ; CHECK-NEXT: madd x0, x10, x8, x9 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/neg-abs.ll b/llvm/test/CodeGen/AArch64/neg-abs.ll index 9be0d1a..35cafe5 100644 --- a/llvm/test/CodeGen/AArch64/neg-abs.ll +++ b/llvm/test/CodeGen/AArch64/neg-abs.ll @@ -1,15 +1,22 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -verify-machineinstrs \ -; RUN: -mtriple=aarch64-unknown-unknown < %s | FileCheck %s +; RUN: llc -mtriple=aarch64-none-elf < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD +; RUN: llc -mtriple=aarch64-none-elf -global-isel < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI declare i64 @llvm.abs.i64(i64, i1 immarg) define i64 @neg_abs64(i64 %x) { -; CHECK-LABEL: neg_abs64: -; CHECK: // %bb.0: -; CHECK-NEXT: cmp x0, #0 -; CHECK-NEXT: cneg x0, x0, pl -; CHECK-NEXT: ret +; CHECK-SD-LABEL: neg_abs64: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmp x0, #0 +; CHECK-SD-NEXT: cneg x0, x0, pl +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: neg_abs64: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: cmp x0, #0 +; CHECK-GI-NEXT: cneg x8, x0, le +; CHECK-GI-NEXT: neg x0, x8 +; CHECK-GI-NEXT: ret %abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true) %neg = sub nsw i64 0, %abs ret i64 %neg @@ -18,11 +25,18 @@ define i64 @neg_abs64(i64 %x) { declare i32 @llvm.abs.i32(i32, i1 immarg) define i32 @neg_abs32(i32 %x) { -; CHECK-LABEL: neg_abs32: -; CHECK: // %bb.0: -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: cneg w0, w0, pl -; CHECK-NEXT: ret +; CHECK-SD-LABEL: neg_abs32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: cneg w0, w0, pl +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: neg_abs32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: cneg w8, w0, le +; CHECK-GI-NEXT: neg w0, w8 +; CHECK-GI-NEXT: ret %abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true) %neg = sub nsw i32 0, %abs ret i32 %neg @@ -31,12 +45,20 @@ define i32 @neg_abs32(i32 %x) { declare i16 @llvm.abs.i16(i16, i1 immarg) define i16 @neg_abs16(i16 %x) { -; CHECK-LABEL: neg_abs16: -; CHECK: // %bb.0: -; CHECK-NEXT: sbfx w8, w0, #15, #1 -; CHECK-NEXT: eor w9, w0, w8 -; CHECK-NEXT: sub w0, w8, w9 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: neg_abs16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sbfx w8, w0, #15, #1 +; CHECK-SD-NEXT: eor w9, w0, w8 +; CHECK-SD-NEXT: sub w0, w8, w9 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: neg_abs16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sxth w8, w0 +; CHECK-GI-NEXT: cmp w8, #0 +; CHECK-GI-NEXT: cneg w8, w0, le +; CHECK-GI-NEXT: neg w0, w8 +; CHECK-GI-NEXT: ret %abs = tail call i16 @llvm.abs.i16(i16 %x, i1 true) %neg = sub nsw i16 0, %abs ret i16 %neg @@ -46,14 +68,25 @@ define i16 @neg_abs16(i16 %x) { declare i128 @llvm.abs.i128(i128, i1 immarg) define i128 @neg_abs128(i128 %x) { -; CHECK-LABEL: neg_abs128: -; CHECK: // %bb.0: -; CHECK-NEXT: asr x8, x1, #63 -; CHECK-NEXT: eor x9, x0, x8 -; CHECK-NEXT: eor x10, x1, x8 -; CHECK-NEXT: subs x0, x8, x9 -; CHECK-NEXT: sbc x1, x8, x10 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: neg_abs128: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: asr x8, x1, #63 +; CHECK-SD-NEXT: eor x9, x0, x8 +; CHECK-SD-NEXT: eor x10, x1, x8 +; CHECK-SD-NEXT: subs x0, x8, x9 +; CHECK-SD-NEXT: sbc x1, x8, x10 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: neg_abs128: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: asr x8, x1, #63 +; CHECK-GI-NEXT: adds x9, x0, x8 +; CHECK-GI-NEXT: adc x10, x1, x8 +; CHECK-GI-NEXT: eor x9, x9, x8 +; CHECK-GI-NEXT: eor x8, x10, x8 +; CHECK-GI-NEXT: negs x0, x9 +; CHECK-GI-NEXT: ngc x1, x8 +; CHECK-GI-NEXT: ret %abs = tail call i128 @llvm.abs.i128(i128 %x, i1 true) %neg = sub nsw i128 0, %abs ret i128 %neg @@ -62,46 +95,76 @@ define i128 @neg_abs128(i128 %x) { define i64 @abs64(i64 %x) { -; CHECK-LABEL: abs64: -; CHECK: // %bb.0: -; CHECK-NEXT: cmp x0, #0 -; CHECK-NEXT: cneg x0, x0, mi -; CHECK-NEXT: ret +; CHECK-SD-LABEL: abs64: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmp x0, #0 +; CHECK-SD-NEXT: cneg x0, x0, mi +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: abs64: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: cmp x0, #0 +; CHECK-GI-NEXT: cneg x0, x0, le +; CHECK-GI-NEXT: ret %abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true) ret i64 %abs } define i32 @abs32(i32 %x) { -; CHECK-LABEL: abs32: -; CHECK: // %bb.0: -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: cneg w0, w0, mi -; CHECK-NEXT: ret +; CHECK-SD-LABEL: abs32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: cmp w0, #0 +; CHECK-SD-NEXT: cneg w0, w0, mi +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: abs32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: cneg w0, w0, le +; CHECK-GI-NEXT: ret %abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true) ret i32 %abs } define i16 @abs16(i16 %x) { -; CHECK-LABEL: abs16: -; CHECK: // %bb.0: -; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: cneg w0, w8, mi -; CHECK-NEXT: ret +; CHECK-SD-LABEL: abs16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sxth w8, w0 +; CHECK-SD-NEXT: cmp w8, #0 +; CHECK-SD-NEXT: cneg w0, w8, mi +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: abs16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sxth w8, w0 +; CHECK-GI-NEXT: cmp w8, #0 +; CHECK-GI-NEXT: cneg w0, w0, le +; CHECK-GI-NEXT: ret %abs = tail call i16 @llvm.abs.i16(i16 %x, i1 true) ret i16 %abs } define i128 @abs128(i128 %x) { -; CHECK-LABEL: abs128: -; CHECK: // %bb.0: -; CHECK-NEXT: asr x8, x1, #63 -; CHECK-NEXT: eor x9, x0, x8 -; CHECK-NEXT: eor x10, x1, x8 -; CHECK-NEXT: subs x0, x9, x8 -; CHECK-NEXT: sbc x1, x10, x8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: abs128: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: asr x8, x1, #63 +; CHECK-SD-NEXT: eor x9, x0, x8 +; CHECK-SD-NEXT: eor x10, x1, x8 +; CHECK-SD-NEXT: subs x0, x9, x8 +; CHECK-SD-NEXT: sbc x1, x10, x8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: abs128: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: asr x8, x1, #63 +; CHECK-GI-NEXT: adds x9, x0, x8 +; CHECK-GI-NEXT: adc x10, x1, x8 +; CHECK-GI-NEXT: eor x0, x9, x8 +; CHECK-GI-NEXT: eor x1, x10, x8 +; CHECK-GI-NEXT: ret %abs = tail call i128 @llvm.abs.i128(i128 %x, i1 true) ret i128 %abs } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK: {{.*}} diff --git a/llvm/test/CodeGen/AArch64/neg-selects.ll b/llvm/test/CodeGen/AArch64/neg-selects.ll index 4ef1633..b643ee7 100644 --- a/llvm/test/CodeGen/AArch64/neg-selects.ll +++ b/llvm/test/CodeGen/AArch64/neg-selects.ll @@ -1,12 +1,22 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=aarch64-none-elf %s -o - | FileCheck %s +; RUN: llc -mtriple=aarch64-none-elf < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD +; RUN: llc -mtriple=aarch64-none-elf -global-isel < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI define i32 @neg_select_neg(i32 %a, i32 %b, i1 %bb) { -; CHECK-LABEL: neg_select_neg: -; CHECK: // %bb.0: -; CHECK-NEXT: tst w2, #0x1 -; CHECK-NEXT: csel w0, w0, w1, ne -; CHECK-NEXT: ret +; CHECK-SD-LABEL: neg_select_neg: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: tst w2, #0x1 +; CHECK-SD-NEXT: csel w0, w0, w1, ne +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: neg_select_neg: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: and w8, w2, #0x1 +; CHECK-GI-NEXT: neg w9, w0 +; CHECK-GI-NEXT: tst w8, #0x1 +; CHECK-GI-NEXT: csneg w8, w9, w1, ne +; CHECK-GI-NEXT: neg w0, w8 +; CHECK-GI-NEXT: ret %nega = sub i32 0, %a %negb = sub i32 0, %b %sel = select i1 %bb, i32 %nega, i32 %negb @@ -15,11 +25,20 @@ define i32 @neg_select_neg(i32 %a, i32 %b, i1 %bb) { } define i32 @negneg_select_nega(i32 %a, i32 %b, i1 %bb) { -; CHECK-LABEL: negneg_select_nega: -; CHECK: // %bb.0: -; CHECK-NEXT: tst w2, #0x1 -; CHECK-NEXT: csneg w0, w1, w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: negneg_select_nega: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: tst w2, #0x1 +; CHECK-SD-NEXT: csneg w0, w1, w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: negneg_select_nega: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: and w8, w2, #0x1 +; CHECK-GI-NEXT: tst w8, #0x1 +; CHECK-GI-NEXT: csneg w8, w1, w0, eq +; CHECK-GI-NEXT: neg w8, w8 +; CHECK-GI-NEXT: neg w0, w8 +; CHECK-GI-NEXT: ret %nega = sub i32 0, %a %sel = select i1 %bb, i32 %nega, i32 %b %nsel = sub i32 0, %sel @@ -28,11 +47,19 @@ define i32 @negneg_select_nega(i32 %a, i32 %b, i1 %bb) { } define i32 @neg_select_nega(i32 %a, i32 %b, i1 %bb) { -; CHECK-LABEL: neg_select_nega: -; CHECK: // %bb.0: -; CHECK-NEXT: tst w2, #0x1 -; CHECK-NEXT: csneg w0, w0, w1, ne -; CHECK-NEXT: ret +; CHECK-SD-LABEL: neg_select_nega: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: tst w2, #0x1 +; CHECK-SD-NEXT: csneg w0, w0, w1, ne +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: neg_select_nega: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: and w8, w2, #0x1 +; CHECK-GI-NEXT: tst w8, #0x1 +; CHECK-GI-NEXT: csneg w8, w1, w0, eq +; CHECK-GI-NEXT: neg w0, w8 +; CHECK-GI-NEXT: ret %nega = sub i32 0, %a %sel = select i1 %bb, i32 %nega, i32 %b %res = sub i32 0, %sel @@ -40,11 +67,19 @@ define i32 @neg_select_nega(i32 %a, i32 %b, i1 %bb) { } define i32 @neg_select_negb(i32 %a, i32 %b, i1 %bb) { -; CHECK-LABEL: neg_select_negb: -; CHECK: // %bb.0: -; CHECK-NEXT: tst w2, #0x1 -; CHECK-NEXT: csneg w0, w1, w0, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: neg_select_negb: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: tst w2, #0x1 +; CHECK-SD-NEXT: csneg w0, w1, w0, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: neg_select_negb: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: and w8, w2, #0x1 +; CHECK-GI-NEXT: tst w8, #0x1 +; CHECK-GI-NEXT: csneg w8, w0, w1, ne +; CHECK-GI-NEXT: neg w0, w8 +; CHECK-GI-NEXT: ret %negb = sub i32 0, %b %sel = select i1 %bb, i32 %a, i32 %negb %res = sub i32 0, %sel @@ -52,28 +87,47 @@ define i32 @neg_select_negb(i32 %a, i32 %b, i1 %bb) { } define i32 @neg_select_ab(i32 %a, i32 %b, i1 %bb) { -; CHECK-LABEL: neg_select_ab: -; CHECK: // %bb.0: -; CHECK-NEXT: tst w2, #0x1 -; CHECK-NEXT: csel w8, w0, w1, ne -; CHECK-NEXT: neg w0, w8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: neg_select_ab: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: tst w2, #0x1 +; CHECK-SD-NEXT: csel w8, w0, w1, ne +; CHECK-SD-NEXT: neg w0, w8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: neg_select_ab: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: and w8, w2, #0x1 +; CHECK-GI-NEXT: tst w8, #0x1 +; CHECK-GI-NEXT: csel w8, w0, w1, ne +; CHECK-GI-NEXT: neg w0, w8 +; CHECK-GI-NEXT: ret %sel = select i1 %bb, i32 %a, i32 %b %res = sub i32 0, %sel ret i32 %res } define i32 @neg_select_nega_with_use(i32 %a, i32 %b, i1 %bb) { -; CHECK-LABEL: neg_select_nega_with_use: -; CHECK: // %bb.0: -; CHECK-NEXT: tst w2, #0x1 -; CHECK-NEXT: neg w8, w0 -; CHECK-NEXT: csneg w9, w1, w0, eq -; CHECK-NEXT: sub w0, w8, w9 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: neg_select_nega_with_use: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: tst w2, #0x1 +; CHECK-SD-NEXT: neg w8, w0 +; CHECK-SD-NEXT: csneg w9, w1, w0, eq +; CHECK-SD-NEXT: sub w0, w8, w9 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: neg_select_nega_with_use: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: and w8, w2, #0x1 +; CHECK-GI-NEXT: tst w8, #0x1 +; CHECK-GI-NEXT: neg w8, w0 +; CHECK-GI-NEXT: csneg w9, w1, w0, eq +; CHECK-GI-NEXT: sub w0, w8, w9 +; CHECK-GI-NEXT: ret %nega = sub i32 0, %a %sel = select i1 %bb, i32 %nega, i32 %b %nsel = sub i32 0, %sel %res = add i32 %nsel, %nega ret i32 %res } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK: {{.*}} diff --git a/llvm/test/CodeGen/AArch64/neon-dot-product.ll b/llvm/test/CodeGen/AArch64/neon-dot-product.ll index cf09a46..584caa30 100644 --- a/llvm/test/CodeGen/AArch64/neon-dot-product.ll +++ b/llvm/test/CodeGen/AArch64/neon-dot-product.ll @@ -1,13 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple aarch64-none-linux-gnu -mattr=+dotprod < %s | FileCheck %s -; RUN: llc -mtriple aarch64-none-linux-gnu -mcpu=cortex-a65 < %s | FileCheck %s -; RUN: llc -mtriple aarch64-none-linux-gnu -mcpu=cortex-a65ae < %s | FileCheck %s -; RUN: llc -mtriple aarch64-none-linux-gnu -mcpu=neoverse-e1 < %s | FileCheck %s -; RUN: llc -mtriple aarch64-none-linux-gnu -mcpu=neoverse-n1 < %s | FileCheck %s -; RUN: llc -mtriple aarch64-none-linux-gnu -mcpu=neoverse-n2 < %s | FileCheck %s -; RUN: llc -mtriple aarch64-none-linux-gnu -mcpu=ampere1 < %s | FileCheck %s -; RUN: llc -mtriple aarch64-none-linux-gnu -mcpu=ampere1a < %s | FileCheck %s -; RUN: llc -mtriple aarch64-none-linux-gnu -mcpu=ampere1b < %s | FileCheck %s +; RUN: llc -mtriple aarch64-none-linux-gnu -mattr=+dotprod < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD +; RUN: llc -mtriple aarch64-none-linux-gnu -mattr=+dotprod -global-isel < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI declare <2 x i32> @llvm.aarch64.neon.udot.v2i32.v8i8(<2 x i32>, <8 x i8>, <8 x i8>) declare <4 x i32> @llvm.aarch64.neon.udot.v4i32.v16i8(<4 x i32>, <16 x i8>, <16 x i8>) @@ -56,10 +49,17 @@ entry: define <2 x i32> @test_vdot_u32_zero(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) #0 { -; CHECK-LABEL: test_vdot_u32_zero: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: udot v0.2s, v1.8b, v2.8b -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_vdot_u32_zero: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: udot v0.2s, v1.8b, v2.8b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_vdot_u32_zero: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v3.2d, #0000000000000000 +; CHECK-GI-NEXT: udot v3.2s, v1.8b, v2.8b +; CHECK-GI-NEXT: add v0.2s, v3.2s, v0.2s +; CHECK-GI-NEXT: ret entry: %vdot1.i = call <2 x i32> @llvm.aarch64.neon.udot.v2i32.v8i8(<2 x i32> zeroinitializer, <8 x i8> %b, <8 x i8> %c) #2 %ret = add <2 x i32> %vdot1.i, %a @@ -67,10 +67,17 @@ entry: } define <4 x i32> @test_vdotq_u32_zero(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) #0 { -; CHECK-LABEL: test_vdotq_u32_zero: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: udot v0.4s, v1.16b, v2.16b -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_vdotq_u32_zero: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: udot v0.4s, v1.16b, v2.16b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_vdotq_u32_zero: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v3.2d, #0000000000000000 +; CHECK-GI-NEXT: udot v3.4s, v1.16b, v2.16b +; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s +; CHECK-GI-NEXT: ret entry: %vdot1.i = call <4 x i32> @llvm.aarch64.neon.udot.v4i32.v16i8(<4 x i32> zeroinitializer, <16 x i8> %b, <16 x i8> %c) #2 %ret = add <4 x i32> %vdot1.i, %a @@ -78,10 +85,17 @@ entry: } define <2 x i32> @test_vdot_s32_zero(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) #0 { -; CHECK-LABEL: test_vdot_s32_zero: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: sdot v0.2s, v1.8b, v2.8b -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_vdot_s32_zero: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: sdot v0.2s, v1.8b, v2.8b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_vdot_s32_zero: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v3.2d, #0000000000000000 +; CHECK-GI-NEXT: sdot v3.2s, v1.8b, v2.8b +; CHECK-GI-NEXT: add v0.2s, v3.2s, v0.2s +; CHECK-GI-NEXT: ret entry: %vdot1.i = call <2 x i32> @llvm.aarch64.neon.sdot.v2i32.v8i8(<2 x i32> zeroinitializer, <8 x i8> %b, <8 x i8> %c) #2 %ret = add <2 x i32> %vdot1.i, %a @@ -89,10 +103,17 @@ entry: } define <4 x i32> @test_vdotq_s32_zero(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) #0 { -; CHECK-LABEL: test_vdotq_s32_zero: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: sdot v0.4s, v1.16b, v2.16b -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_vdotq_s32_zero: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: sdot v0.4s, v1.16b, v2.16b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_vdotq_s32_zero: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v3.2d, #0000000000000000 +; CHECK-GI-NEXT: sdot v3.4s, v1.16b, v2.16b +; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s +; CHECK-GI-NEXT: ret entry: %vdot1.i = call <4 x i32> @llvm.aarch64.neon.sdot.v4i32.v16i8(<4 x i32> zeroinitializer, <16 x i8> %b, <16 x i8> %c) #2 %ret = add <4 x i32> %vdot1.i, %a @@ -156,11 +177,19 @@ entry: define <2 x i32> @test_vdot_lane_u32_zero(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) { -; CHECK-LABEL: test_vdot_lane_u32_zero: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: udot v0.2s, v1.8b, v2.4b[1] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_vdot_lane_u32_zero: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-SD-NEXT: udot v0.2s, v1.8b, v2.4b[1] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_vdot_lane_u32_zero: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v3.2d, #0000000000000000 +; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-GI-NEXT: udot v3.2s, v1.8b, v2.4b[1] +; CHECK-GI-NEXT: add v0.2s, v3.2s, v0.2s +; CHECK-GI-NEXT: ret entry: %.cast = bitcast <8 x i8> %c to <2 x i32> %shuffle = shufflevector <2 x i32> %.cast, <2 x i32> undef, <2 x i32> <i32 1, i32 1> @@ -171,11 +200,19 @@ entry: } define <4 x i32> @test_vdotq_lane_u32_zero(<4 x i32> %a, <16 x i8> %b, <8 x i8> %c) { -; CHECK-LABEL: test_vdotq_lane_u32_zero: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: udot v0.4s, v1.16b, v2.4b[1] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_vdotq_lane_u32_zero: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-SD-NEXT: udot v0.4s, v1.16b, v2.4b[1] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_vdotq_lane_u32_zero: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v3.2d, #0000000000000000 +; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-GI-NEXT: udot v3.4s, v1.16b, v2.4b[1] +; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s +; CHECK-GI-NEXT: ret entry: %.cast = bitcast <8 x i8> %c to <2 x i32> %shuffle = shufflevector <2 x i32> %.cast, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> @@ -186,10 +223,17 @@ entry: } define <2 x i32> @test_vdot_laneq_u32_zero(<2 x i32> %a, <8 x i8> %b, <16 x i8> %c) { -; CHECK-LABEL: test_vdot_laneq_u32_zero: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: udot v0.2s, v1.8b, v2.4b[1] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_vdot_laneq_u32_zero: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: udot v0.2s, v1.8b, v2.4b[1] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_vdot_laneq_u32_zero: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v3.2d, #0000000000000000 +; CHECK-GI-NEXT: udot v3.2s, v1.8b, v2.4b[1] +; CHECK-GI-NEXT: add v0.2s, v3.2s, v0.2s +; CHECK-GI-NEXT: ret entry: %.cast = bitcast <16 x i8> %c to <4 x i32> %shuffle = shufflevector <4 x i32> %.cast, <4 x i32> undef, <2 x i32> <i32 1, i32 1> @@ -200,10 +244,17 @@ entry: } define <4 x i32> @test_vdotq_laneq_u32_zero(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) { -; CHECK-LABEL: test_vdotq_laneq_u32_zero: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: udot v0.4s, v1.16b, v2.4b[1] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_vdotq_laneq_u32_zero: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: udot v0.4s, v1.16b, v2.4b[1] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_vdotq_laneq_u32_zero: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v3.2d, #0000000000000000 +; CHECK-GI-NEXT: udot v3.4s, v1.16b, v2.4b[1] +; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s +; CHECK-GI-NEXT: ret entry: %.cast = bitcast <16 x i8> %c to <4 x i32> %shuffle = shufflevector <4 x i32> %.cast, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> @@ -270,11 +321,19 @@ entry: define <2 x i32> @test_vdot_lane_s32_zero(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) { -; CHECK-LABEL: test_vdot_lane_s32_zero: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: sdot v0.2s, v1.8b, v2.4b[1] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_vdot_lane_s32_zero: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-SD-NEXT: sdot v0.2s, v1.8b, v2.4b[1] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_vdot_lane_s32_zero: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v3.2d, #0000000000000000 +; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-GI-NEXT: sdot v3.2s, v1.8b, v2.4b[1] +; CHECK-GI-NEXT: add v0.2s, v3.2s, v0.2s +; CHECK-GI-NEXT: ret entry: %.cast = bitcast <8 x i8> %c to <2 x i32> %shuffle = shufflevector <2 x i32> %.cast, <2 x i32> undef, <2 x i32> <i32 1, i32 1> @@ -285,11 +344,19 @@ entry: } define <4 x i32> @test_vdotq_lane_s32_zero(<4 x i32> %a, <16 x i8> %b, <8 x i8> %c) { -; CHECK-LABEL: test_vdotq_lane_s32_zero: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2 -; CHECK-NEXT: sdot v0.4s, v1.16b, v2.4b[1] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_vdotq_lane_s32_zero: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-SD-NEXT: sdot v0.4s, v1.16b, v2.4b[1] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_vdotq_lane_s32_zero: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v3.2d, #0000000000000000 +; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2 +; CHECK-GI-NEXT: sdot v3.4s, v1.16b, v2.4b[1] +; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s +; CHECK-GI-NEXT: ret entry: %.cast = bitcast <8 x i8> %c to <2 x i32> %shuffle = shufflevector <2 x i32> %.cast, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> @@ -300,10 +367,17 @@ entry: } define <2 x i32> @test_vdot_laneq_s32_zero(<2 x i32> %a, <8 x i8> %b, <16 x i8> %c) { -; CHECK-LABEL: test_vdot_laneq_s32_zero: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: sdot v0.2s, v1.8b, v2.4b[1] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_vdot_laneq_s32_zero: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: sdot v0.2s, v1.8b, v2.4b[1] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_vdot_laneq_s32_zero: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v3.2d, #0000000000000000 +; CHECK-GI-NEXT: sdot v3.2s, v1.8b, v2.4b[1] +; CHECK-GI-NEXT: add v0.2s, v3.2s, v0.2s +; CHECK-GI-NEXT: ret entry: %.cast = bitcast <16 x i8> %c to <4 x i32> %shuffle = shufflevector <4 x i32> %.cast, <4 x i32> undef, <2 x i32> <i32 1, i32 1> @@ -314,10 +388,17 @@ entry: } define <4 x i32> @test_vdotq_laneq_s32_zero(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) { -; CHECK-LABEL: test_vdotq_laneq_s32_zero: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: sdot v0.4s, v1.16b, v2.4b[1] -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_vdotq_laneq_s32_zero: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: sdot v0.4s, v1.16b, v2.4b[1] +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_vdotq_laneq_s32_zero: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: movi v3.2d, #0000000000000000 +; CHECK-GI-NEXT: sdot v3.4s, v1.16b, v2.4b[1] +; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s +; CHECK-GI-NEXT: ret entry: %.cast = bitcast <16 x i8> %c to <4 x i32> %shuffle = shufflevector <4 x i32> %.cast, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> @@ -326,3 +407,6 @@ entry: %ret = add <4 x i32> %vdot1.i, %a ret <4 x i32> %ret } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK-GI: {{.*}} +; CHECK-SD: {{.*}} diff --git a/llvm/test/CodeGen/AArch64/neon-dotreduce.ll b/llvm/test/CodeGen/AArch64/neon-dotreduce.ll index 4f0c408..048e988 100644 --- a/llvm/test/CodeGen/AArch64/neon-dotreduce.ll +++ b/llvm/test/CodeGen/AArch64/neon-dotreduce.ll @@ -28,46 +28,28 @@ define i32 @test_udot_v4i8(ptr nocapture readonly %a, ptr nocapture readonly %b, ; ; CHECK-GI-LABEL: test_udot_v4i8: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: ldr w8, [x0] -; CHECK-GI-NEXT: ldr w9, [x1] +; CHECK-GI-NEXT: ldr w8, [x1] +; CHECK-GI-NEXT: ldr w9, [x0] ; CHECK-GI-NEXT: fmov s0, w8 -; CHECK-GI-NEXT: fmov s2, w9 -; CHECK-GI-NEXT: uxtb w8, w8 -; CHECK-GI-NEXT: uxtb w9, w9 -; CHECK-GI-NEXT: mov b1, v0.b[1] -; CHECK-GI-NEXT: mov b3, v0.b[2] -; CHECK-GI-NEXT: mov b5, v2.b[2] -; CHECK-GI-NEXT: mov b4, v0.b[3] -; CHECK-GI-NEXT: mov b0, v2.b[1] -; CHECK-GI-NEXT: mov b6, v2.b[3] -; CHECK-GI-NEXT: fmov s2, w9 -; CHECK-GI-NEXT: fmov w10, s1 -; CHECK-GI-NEXT: fmov w11, s3 -; CHECK-GI-NEXT: fmov s1, w8 -; CHECK-GI-NEXT: fmov w13, s5 -; CHECK-GI-NEXT: fmov w8, s4 -; CHECK-GI-NEXT: fmov w12, s0 -; CHECK-GI-NEXT: uxtb w10, w10 -; CHECK-GI-NEXT: uxtb w11, w11 -; CHECK-GI-NEXT: uxtb w13, w13 -; CHECK-GI-NEXT: uxtb w8, w8 -; CHECK-GI-NEXT: uxtb w12, w12 -; CHECK-GI-NEXT: mov v1.h[1], w10 -; CHECK-GI-NEXT: fmov w10, s6 -; CHECK-GI-NEXT: fmov s0, w11 -; CHECK-GI-NEXT: fmov s3, w13 -; CHECK-GI-NEXT: mov v2.h[1], w12 -; CHECK-GI-NEXT: uxtb w10, w10 -; CHECK-GI-NEXT: mov v0.h[1], w8 -; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0 -; CHECK-GI-NEXT: mov v3.h[1], w10 -; CHECK-GI-NEXT: ushll v2.4s, v2.4h, #0 -; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0 -; CHECK-GI-NEXT: ushll v3.4s, v3.4h, #0 -; CHECK-GI-NEXT: mov v1.d[1], v0.d[0] -; CHECK-GI-NEXT: mov v2.d[1], v3.d[0] -; CHECK-GI-NEXT: mul v0.4s, v2.4s, v1.4s -; CHECK-GI-NEXT: addv s0, v0.4s +; CHECK-GI-NEXT: fmov s1, w9 +; CHECK-GI-NEXT: mov b2, v0.b[1] +; CHECK-GI-NEXT: mov v3.b[0], v0.b[0] +; CHECK-GI-NEXT: mov b4, v1.b[1] +; CHECK-GI-NEXT: mov v5.b[0], v1.b[0] +; CHECK-GI-NEXT: mov v3.b[1], v2.b[0] +; CHECK-GI-NEXT: mov b2, v0.b[2] +; CHECK-GI-NEXT: mov b0, v0.b[3] +; CHECK-GI-NEXT: mov v5.b[1], v4.b[0] +; CHECK-GI-NEXT: mov b4, v1.b[2] +; CHECK-GI-NEXT: mov b1, v1.b[3] +; CHECK-GI-NEXT: mov v3.b[2], v2.b[0] +; CHECK-GI-NEXT: mov v5.b[2], v4.b[0] +; CHECK-GI-NEXT: mov v3.b[3], v0.b[0] +; CHECK-GI-NEXT: mov v5.b[3], v1.b[0] +; CHECK-GI-NEXT: ushll v0.8h, v3.8b, #0 +; CHECK-GI-NEXT: ushll v1.8h, v5.8b, #0 +; CHECK-GI-NEXT: mul v0.4h, v0.4h, v1.4h +; CHECK-GI-NEXT: uaddlv s0, v0.4h ; CHECK-GI-NEXT: fmov w8, s0 ; CHECK-GI-NEXT: add w0, w8, w2 ; CHECK-GI-NEXT: ret @@ -128,46 +110,28 @@ define i32 @test_sdot_v4i8(ptr nocapture readonly %a, ptr nocapture readonly %b, ; ; CHECK-GI-LABEL: test_sdot_v4i8: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: ldr w8, [x0] -; CHECK-GI-NEXT: ldr w9, [x1] +; CHECK-GI-NEXT: ldr w8, [x1] +; CHECK-GI-NEXT: ldr w9, [x0] ; CHECK-GI-NEXT: fmov s0, w8 -; CHECK-GI-NEXT: fmov s2, w9 -; CHECK-GI-NEXT: sxtb w8, w8 -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: mov b1, v0.b[1] -; CHECK-GI-NEXT: mov b3, v0.b[2] -; CHECK-GI-NEXT: mov b5, v2.b[2] -; CHECK-GI-NEXT: mov b4, v0.b[3] -; CHECK-GI-NEXT: mov b0, v2.b[1] -; CHECK-GI-NEXT: mov b6, v2.b[3] -; CHECK-GI-NEXT: fmov s2, w9 -; CHECK-GI-NEXT: fmov w10, s1 -; CHECK-GI-NEXT: fmov w11, s3 -; CHECK-GI-NEXT: fmov s1, w8 -; CHECK-GI-NEXT: fmov w13, s5 -; CHECK-GI-NEXT: fmov w8, s4 -; CHECK-GI-NEXT: fmov w12, s0 -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: sxtb w13, w13 -; CHECK-GI-NEXT: sxtb w8, w8 -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: mov v1.h[1], w10 -; CHECK-GI-NEXT: fmov w10, s6 -; CHECK-GI-NEXT: fmov s0, w11 -; CHECK-GI-NEXT: fmov s3, w13 -; CHECK-GI-NEXT: mov v2.h[1], w12 -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: mov v0.h[1], w8 -; CHECK-GI-NEXT: sshll v1.4s, v1.4h, #0 -; CHECK-GI-NEXT: mov v3.h[1], w10 -; CHECK-GI-NEXT: sshll v2.4s, v2.4h, #0 -; CHECK-GI-NEXT: sshll v0.4s, v0.4h, #0 -; CHECK-GI-NEXT: sshll v3.4s, v3.4h, #0 -; CHECK-GI-NEXT: mov v1.d[1], v0.d[0] -; CHECK-GI-NEXT: mov v2.d[1], v3.d[0] -; CHECK-GI-NEXT: mul v0.4s, v2.4s, v1.4s -; CHECK-GI-NEXT: addv s0, v0.4s +; CHECK-GI-NEXT: fmov s1, w9 +; CHECK-GI-NEXT: mov b2, v0.b[1] +; CHECK-GI-NEXT: mov v3.b[0], v0.b[0] +; CHECK-GI-NEXT: mov b4, v1.b[1] +; CHECK-GI-NEXT: mov v5.b[0], v1.b[0] +; CHECK-GI-NEXT: mov v3.b[1], v2.b[0] +; CHECK-GI-NEXT: mov b2, v0.b[2] +; CHECK-GI-NEXT: mov b0, v0.b[3] +; CHECK-GI-NEXT: mov v5.b[1], v4.b[0] +; CHECK-GI-NEXT: mov b4, v1.b[2] +; CHECK-GI-NEXT: mov b1, v1.b[3] +; CHECK-GI-NEXT: mov v3.b[2], v2.b[0] +; CHECK-GI-NEXT: mov v5.b[2], v4.b[0] +; CHECK-GI-NEXT: mov v3.b[3], v0.b[0] +; CHECK-GI-NEXT: mov v5.b[3], v1.b[0] +; CHECK-GI-NEXT: sshll v0.8h, v3.8b, #0 +; CHECK-GI-NEXT: sshll v1.8h, v5.8b, #0 +; CHECK-GI-NEXT: mul v0.4h, v0.4h, v1.4h +; CHECK-GI-NEXT: saddlv s0, v0.4h ; CHECK-GI-NEXT: fmov w8, s0 ; CHECK-GI-NEXT: add w0, w8, w2 ; CHECK-GI-NEXT: ret @@ -205,22 +169,18 @@ define i32 @test_sdot_v4i8_double(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i8 ; ; CHECK-GI-LABEL: test_sdot_v4i8_double: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0 -; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0 -; CHECK-GI-NEXT: ushll v2.4s, v2.4h, #0 -; CHECK-GI-NEXT: ushll v3.4s, v3.4h, #0 -; CHECK-GI-NEXT: shl v0.4s, v0.4s, #24 -; CHECK-GI-NEXT: shl v1.4s, v1.4s, #24 -; CHECK-GI-NEXT: shl v2.4s, v2.4s, #24 -; CHECK-GI-NEXT: shl v3.4s, v3.4s, #24 -; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #24 -; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #24 -; CHECK-GI-NEXT: sshr v2.4s, v2.4s, #24 -; CHECK-GI-NEXT: sshr v3.4s, v3.4s, #24 -; CHECK-GI-NEXT: mul v0.4s, v0.4s, v1.4s -; CHECK-GI-NEXT: mul v1.4s, v2.4s, v3.4s -; CHECK-GI-NEXT: addv s0, v0.4s -; CHECK-GI-NEXT: addv s1, v1.4s +; CHECK-GI-NEXT: shl v0.4h, v0.4h, #8 +; CHECK-GI-NEXT: shl v1.4h, v1.4h, #8 +; CHECK-GI-NEXT: shl v2.4h, v2.4h, #8 +; CHECK-GI-NEXT: shl v3.4h, v3.4h, #8 +; CHECK-GI-NEXT: sshr v0.4h, v0.4h, #8 +; CHECK-GI-NEXT: sshr v1.4h, v1.4h, #8 +; CHECK-GI-NEXT: sshr v2.4h, v2.4h, #8 +; CHECK-GI-NEXT: sshr v3.4h, v3.4h, #8 +; CHECK-GI-NEXT: mul v0.4h, v0.4h, v1.4h +; CHECK-GI-NEXT: mul v1.4h, v2.4h, v3.4h +; CHECK-GI-NEXT: saddlv s0, v0.4h +; CHECK-GI-NEXT: saddlv s1, v1.4h ; CHECK-GI-NEXT: fmov w8, s0 ; CHECK-GI-NEXT: fmov w9, s1 ; CHECK-GI-NEXT: add w0, w8, w9 @@ -414,31 +374,60 @@ define i32 @test_udot_v5i8(ptr nocapture readonly %a, ptr nocapture readonly %b, ; CHECK-GI: // %bb.0: // %entry ; CHECK-GI-NEXT: ldr d0, [x0] ; CHECK-GI-NEXT: ldr d1, [x1] -; CHECK-GI-NEXT: umov w8, v1.b[4] -; CHECK-GI-NEXT: umov w9, v0.b[4] -; CHECK-GI-NEXT: umov w10, v1.b[0] -; CHECK-GI-NEXT: umov w12, v0.b[0] -; CHECK-GI-NEXT: umov w11, v1.b[1] -; CHECK-GI-NEXT: umov w13, v0.b[1] -; CHECK-GI-NEXT: mul w8, w8, w9 -; CHECK-GI-NEXT: fmov s2, w10 -; CHECK-GI-NEXT: umov w9, v1.b[2] -; CHECK-GI-NEXT: fmov s3, w12 -; CHECK-GI-NEXT: umov w10, v1.b[3] -; CHECK-GI-NEXT: fmov s4, w8 -; CHECK-GI-NEXT: mov v2.s[1], w11 -; CHECK-GI-NEXT: umov w8, v0.b[2] -; CHECK-GI-NEXT: mov v3.s[1], w13 -; CHECK-GI-NEXT: umov w11, v0.b[3] -; CHECK-GI-NEXT: mov v4.s[1], wzr -; CHECK-GI-NEXT: mov v2.s[2], w9 -; CHECK-GI-NEXT: mov v3.s[2], w8 -; CHECK-GI-NEXT: mov v4.s[2], wzr -; CHECK-GI-NEXT: mov v2.s[3], w10 -; CHECK-GI-NEXT: mov v3.s[3], w11 -; CHECK-GI-NEXT: mov v4.s[3], wzr -; CHECK-GI-NEXT: mla v4.4s, v2.4s, v3.4s -; CHECK-GI-NEXT: addv s0, v4.4s +; CHECK-GI-NEXT: mov b2, v0.b[1] +; CHECK-GI-NEXT: mov b3, v1.b[1] +; CHECK-GI-NEXT: fmov w8, s1 +; CHECK-GI-NEXT: fmov w9, s0 +; CHECK-GI-NEXT: mov b4, v1.b[2] +; CHECK-GI-NEXT: mov b5, v0.b[2] +; CHECK-GI-NEXT: mov b6, v0.b[3] +; CHECK-GI-NEXT: mov b7, v1.b[3] +; CHECK-GI-NEXT: mov b0, v0.b[4] +; CHECK-GI-NEXT: uxtb w8, w8 +; CHECK-GI-NEXT: mov b1, v1.b[4] +; CHECK-GI-NEXT: fmov w10, s3 +; CHECK-GI-NEXT: uxtb w9, w9 +; CHECK-GI-NEXT: fmov w11, s2 +; CHECK-GI-NEXT: fmov s2, w8 +; CHECK-GI-NEXT: fmov w8, s4 +; CHECK-GI-NEXT: fmov s3, w9 +; CHECK-GI-NEXT: fmov w9, s5 +; CHECK-GI-NEXT: uxtb w10, w10 +; CHECK-GI-NEXT: uxtb w11, w11 +; CHECK-GI-NEXT: uxtb w8, w8 +; CHECK-GI-NEXT: mov v2.h[1], w10 +; CHECK-GI-NEXT: mov v3.h[1], w11 +; CHECK-GI-NEXT: uxtb w9, w9 +; CHECK-GI-NEXT: mov v2.h[2], w8 +; CHECK-GI-NEXT: mov v3.h[2], w9 +; CHECK-GI-NEXT: fmov w8, s7 +; CHECK-GI-NEXT: fmov w9, s6 +; CHECK-GI-NEXT: uxtb w8, w8 +; CHECK-GI-NEXT: uxtb w9, w9 +; CHECK-GI-NEXT: mov v2.h[3], w8 +; CHECK-GI-NEXT: fmov w8, s1 +; CHECK-GI-NEXT: mov v3.h[3], w9 +; CHECK-GI-NEXT: fmov w9, s0 +; CHECK-GI-NEXT: uxtb w8, w8 +; CHECK-GI-NEXT: uxtb w9, w9 +; CHECK-GI-NEXT: mov v2.h[4], w8 +; CHECK-GI-NEXT: mov v3.h[4], w9 +; CHECK-GI-NEXT: mul v0.8h, v2.8h, v3.8h +; CHECK-GI-NEXT: umov w8, v0.h[0] +; CHECK-GI-NEXT: umov w9, v0.h[4] +; CHECK-GI-NEXT: umov w10, v0.h[1] +; CHECK-GI-NEXT: fmov s1, w8 +; CHECK-GI-NEXT: fmov s2, w9 +; CHECK-GI-NEXT: umov w8, v0.h[2] +; CHECK-GI-NEXT: umov w9, v0.h[3] +; CHECK-GI-NEXT: mov v1.s[1], w10 +; CHECK-GI-NEXT: mov v2.s[1], wzr +; CHECK-GI-NEXT: mov v1.s[2], w8 +; CHECK-GI-NEXT: mov v2.s[2], wzr +; CHECK-GI-NEXT: mov v1.s[3], w9 +; CHECK-GI-NEXT: mov v2.s[3], wzr +; CHECK-GI-NEXT: add v0.4s, v1.4s, v2.4s +; CHECK-GI-NEXT: addv s0, v0.4s ; CHECK-GI-NEXT: fmov w8, s0 ; CHECK-GI-NEXT: add w0, w8, w2 ; CHECK-GI-NEXT: ret @@ -511,31 +500,60 @@ define i32 @test_sdot_v5i8(ptr nocapture readonly %a, ptr nocapture readonly %b, ; CHECK-GI: // %bb.0: // %entry ; CHECK-GI-NEXT: ldr d0, [x0] ; CHECK-GI-NEXT: ldr d1, [x1] -; CHECK-GI-NEXT: smov w8, v1.b[4] -; CHECK-GI-NEXT: smov w9, v0.b[4] -; CHECK-GI-NEXT: smov w10, v1.b[0] -; CHECK-GI-NEXT: smov w12, v0.b[0] -; CHECK-GI-NEXT: smov w11, v1.b[1] -; CHECK-GI-NEXT: smov w13, v0.b[1] -; CHECK-GI-NEXT: mul w8, w8, w9 -; CHECK-GI-NEXT: fmov s2, w10 -; CHECK-GI-NEXT: smov w9, v1.b[2] -; CHECK-GI-NEXT: fmov s3, w12 -; CHECK-GI-NEXT: smov w10, v1.b[3] -; CHECK-GI-NEXT: fmov s4, w8 -; CHECK-GI-NEXT: mov v2.s[1], w11 -; CHECK-GI-NEXT: smov w8, v0.b[2] -; CHECK-GI-NEXT: mov v3.s[1], w13 -; CHECK-GI-NEXT: smov w11, v0.b[3] -; CHECK-GI-NEXT: mov v4.s[1], wzr -; CHECK-GI-NEXT: mov v2.s[2], w9 -; CHECK-GI-NEXT: mov v3.s[2], w8 -; CHECK-GI-NEXT: mov v4.s[2], wzr -; CHECK-GI-NEXT: mov v2.s[3], w10 -; CHECK-GI-NEXT: mov v3.s[3], w11 -; CHECK-GI-NEXT: mov v4.s[3], wzr -; CHECK-GI-NEXT: mla v4.4s, v2.4s, v3.4s -; CHECK-GI-NEXT: addv s0, v4.4s +; CHECK-GI-NEXT: mov b2, v0.b[1] +; CHECK-GI-NEXT: mov b3, v1.b[1] +; CHECK-GI-NEXT: fmov w8, s1 +; CHECK-GI-NEXT: fmov w9, s0 +; CHECK-GI-NEXT: mov b4, v1.b[2] +; CHECK-GI-NEXT: mov b5, v0.b[2] +; CHECK-GI-NEXT: mov b6, v0.b[3] +; CHECK-GI-NEXT: mov b7, v1.b[3] +; CHECK-GI-NEXT: mov b0, v0.b[4] +; CHECK-GI-NEXT: sxtb w8, w8 +; CHECK-GI-NEXT: mov b1, v1.b[4] +; CHECK-GI-NEXT: fmov w10, s3 +; CHECK-GI-NEXT: sxtb w9, w9 +; CHECK-GI-NEXT: fmov w11, s2 +; CHECK-GI-NEXT: fmov s2, w8 +; CHECK-GI-NEXT: fmov w8, s4 +; CHECK-GI-NEXT: fmov s3, w9 +; CHECK-GI-NEXT: fmov w9, s5 +; CHECK-GI-NEXT: sxtb w10, w10 +; CHECK-GI-NEXT: sxtb w11, w11 +; CHECK-GI-NEXT: sxtb w8, w8 +; CHECK-GI-NEXT: mov v2.h[1], w10 +; CHECK-GI-NEXT: mov v3.h[1], w11 +; CHECK-GI-NEXT: sxtb w9, w9 +; CHECK-GI-NEXT: mov v2.h[2], w8 +; CHECK-GI-NEXT: mov v3.h[2], w9 +; CHECK-GI-NEXT: fmov w8, s7 +; CHECK-GI-NEXT: fmov w9, s6 +; CHECK-GI-NEXT: sxtb w8, w8 +; CHECK-GI-NEXT: sxtb w9, w9 +; CHECK-GI-NEXT: mov v2.h[3], w8 +; CHECK-GI-NEXT: fmov w8, s1 +; CHECK-GI-NEXT: mov v3.h[3], w9 +; CHECK-GI-NEXT: fmov w9, s0 +; CHECK-GI-NEXT: sxtb w8, w8 +; CHECK-GI-NEXT: sxtb w9, w9 +; CHECK-GI-NEXT: mov v2.h[4], w8 +; CHECK-GI-NEXT: mov v3.h[4], w9 +; CHECK-GI-NEXT: mul v0.8h, v2.8h, v3.8h +; CHECK-GI-NEXT: smov w8, v0.h[0] +; CHECK-GI-NEXT: smov w9, v0.h[4] +; CHECK-GI-NEXT: smov w10, v0.h[1] +; CHECK-GI-NEXT: fmov s1, w8 +; CHECK-GI-NEXT: fmov s2, w9 +; CHECK-GI-NEXT: smov w8, v0.h[2] +; CHECK-GI-NEXT: smov w9, v0.h[3] +; CHECK-GI-NEXT: mov v1.s[1], w10 +; CHECK-GI-NEXT: mov v2.s[1], wzr +; CHECK-GI-NEXT: mov v1.s[2], w8 +; CHECK-GI-NEXT: mov v2.s[2], wzr +; CHECK-GI-NEXT: mov v1.s[3], w9 +; CHECK-GI-NEXT: mov v2.s[3], wzr +; CHECK-GI-NEXT: add v0.4s, v1.4s, v2.4s +; CHECK-GI-NEXT: addv s0, v0.4s ; CHECK-GI-NEXT: fmov w8, s0 ; CHECK-GI-NEXT: add w0, w8, w2 ; CHECK-GI-NEXT: ret @@ -571,59 +589,117 @@ define i32 @test_sdot_v5i8_double(<5 x i8> %a, <5 x i8> %b, <5 x i8> %c, <5 x i8 ; CHECK-GI-LABEL: test_sdot_v5i8_double: ; CHECK-GI: // %bb.0: // %entry ; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-NEXT: mov b17, v0.b[1] +; CHECK-GI-NEXT: fmov w8, s0 ; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1 ; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2 ; CHECK-GI-NEXT: // kill: def $d3 killed $d3 def $q3 -; CHECK-GI-NEXT: smov w9, v1.b[0] -; CHECK-GI-NEXT: smov w10, v0.b[4] -; CHECK-GI-NEXT: smov w11, v1.b[4] -; CHECK-GI-NEXT: smov w12, v2.b[0] -; CHECK-GI-NEXT: smov w13, v2.b[4] -; CHECK-GI-NEXT: smov w14, v3.b[4] -; CHECK-GI-NEXT: smov w8, v0.b[0] -; CHECK-GI-NEXT: smov w16, v3.b[0] -; CHECK-GI-NEXT: smov w15, v0.b[1] -; CHECK-GI-NEXT: fmov s5, w9 -; CHECK-GI-NEXT: mul w9, w10, w11 -; CHECK-GI-NEXT: smov w10, v1.b[1] -; CHECK-GI-NEXT: fmov s6, w12 -; CHECK-GI-NEXT: mul w12, w13, w14 -; CHECK-GI-NEXT: smov w11, v2.b[1] -; CHECK-GI-NEXT: smov w13, v3.b[1] -; CHECK-GI-NEXT: fmov s4, w8 -; CHECK-GI-NEXT: fmov s7, w16 -; CHECK-GI-NEXT: fmov s16, w9 -; CHECK-GI-NEXT: smov w8, v0.b[2] -; CHECK-GI-NEXT: smov w14, v1.b[2] -; CHECK-GI-NEXT: fmov s17, w12 -; CHECK-GI-NEXT: smov w9, v3.b[2] -; CHECK-GI-NEXT: mov v5.s[1], w10 -; CHECK-GI-NEXT: mov v4.s[1], w15 -; CHECK-GI-NEXT: smov w15, v2.b[2] -; CHECK-GI-NEXT: mov v6.s[1], w11 -; CHECK-GI-NEXT: mov v16.s[1], wzr -; CHECK-GI-NEXT: mov v7.s[1], w13 -; CHECK-GI-NEXT: smov w10, v0.b[3] -; CHECK-GI-NEXT: mov v17.s[1], wzr -; CHECK-GI-NEXT: smov w11, v1.b[3] -; CHECK-GI-NEXT: smov w12, v2.b[3] -; CHECK-GI-NEXT: smov w13, v3.b[3] -; CHECK-GI-NEXT: mov v5.s[2], w14 -; CHECK-GI-NEXT: mov v4.s[2], w8 -; CHECK-GI-NEXT: mov v6.s[2], w15 -; CHECK-GI-NEXT: mov v16.s[2], wzr -; CHECK-GI-NEXT: mov v7.s[2], w9 -; CHECK-GI-NEXT: mov v17.s[2], wzr -; CHECK-GI-NEXT: mov v5.s[3], w11 -; CHECK-GI-NEXT: mov v4.s[3], w10 -; CHECK-GI-NEXT: mov v6.s[3], w12 -; CHECK-GI-NEXT: mov v16.s[3], wzr -; CHECK-GI-NEXT: mov v7.s[3], w13 -; CHECK-GI-NEXT: mov v17.s[3], wzr -; CHECK-GI-NEXT: mla v16.4s, v4.4s, v5.4s -; CHECK-GI-NEXT: mla v17.4s, v6.4s, v7.4s -; CHECK-GI-NEXT: addv s0, v16.4s -; CHECK-GI-NEXT: addv s1, v17.4s +; CHECK-GI-NEXT: fmov w11, s1 +; CHECK-GI-NEXT: mov b25, v1.b[1] +; CHECK-GI-NEXT: mov b16, v1.b[2] +; CHECK-GI-NEXT: mov b7, v1.b[3] +; CHECK-GI-NEXT: mov b5, v1.b[4] +; CHECK-GI-NEXT: mov b22, v2.b[1] +; CHECK-GI-NEXT: mov b23, v3.b[1] +; CHECK-GI-NEXT: sxtb w9, w8 +; CHECK-GI-NEXT: sxtb w11, w11 +; CHECK-GI-NEXT: mov b24, v0.b[2] +; CHECK-GI-NEXT: fmov w8, s17 +; CHECK-GI-NEXT: mov b6, v0.b[3] +; CHECK-GI-NEXT: mov b4, v0.b[4] +; CHECK-GI-NEXT: fmov s1, w9 +; CHECK-GI-NEXT: mov b18, v2.b[2] +; CHECK-GI-NEXT: mov b19, v2.b[3] +; CHECK-GI-NEXT: mov b0, v2.b[4] +; CHECK-GI-NEXT: fmov w9, s25 +; CHECK-GI-NEXT: fmov w12, s22 +; CHECK-GI-NEXT: sxtb w10, w8 +; CHECK-GI-NEXT: mov b21, v3.b[2] +; CHECK-GI-NEXT: fmov w13, s23 +; CHECK-GI-NEXT: mov b20, v3.b[3] +; CHECK-GI-NEXT: mov b17, v3.b[4] +; CHECK-GI-NEXT: fmov w8, s24 +; CHECK-GI-NEXT: sxtb w9, w9 +; CHECK-GI-NEXT: sxtb w12, w12 +; CHECK-GI-NEXT: mov v1.h[1], w10 +; CHECK-GI-NEXT: sxtb w13, w13 +; CHECK-GI-NEXT: fmov w10, s2 +; CHECK-GI-NEXT: fmov s2, w11 +; CHECK-GI-NEXT: sxtb w8, w8 +; CHECK-GI-NEXT: fmov w11, s3 +; CHECK-GI-NEXT: sxtb w10, w10 +; CHECK-GI-NEXT: mov v2.h[1], w9 +; CHECK-GI-NEXT: fmov w9, s16 +; CHECK-GI-NEXT: sxtb w11, w11 +; CHECK-GI-NEXT: mov v1.h[2], w8 +; CHECK-GI-NEXT: fmov w8, s7 +; CHECK-GI-NEXT: fmov s3, w10 +; CHECK-GI-NEXT: fmov w10, s18 +; CHECK-GI-NEXT: sxtb w9, w9 +; CHECK-GI-NEXT: fmov s22, w11 +; CHECK-GI-NEXT: fmov w11, s21 +; CHECK-GI-NEXT: sxtb w8, w8 +; CHECK-GI-NEXT: mov v3.h[1], w12 +; CHECK-GI-NEXT: sxtb w10, w10 +; CHECK-GI-NEXT: mov v2.h[2], w9 +; CHECK-GI-NEXT: mov v22.h[1], w13 +; CHECK-GI-NEXT: sxtb w11, w11 +; CHECK-GI-NEXT: fmov w9, s19 +; CHECK-GI-NEXT: fmov w12, s6 +; CHECK-GI-NEXT: mov v3.h[2], w10 +; CHECK-GI-NEXT: fmov w10, s20 +; CHECK-GI-NEXT: sxtb w9, w9 +; CHECK-GI-NEXT: mov v22.h[2], w11 +; CHECK-GI-NEXT: sxtb w12, w12 +; CHECK-GI-NEXT: fmov w11, s4 +; CHECK-GI-NEXT: mov v2.h[3], w8 +; CHECK-GI-NEXT: fmov w8, s5 +; CHECK-GI-NEXT: sxtb w10, w10 +; CHECK-GI-NEXT: mov v1.h[3], w12 +; CHECK-GI-NEXT: mov v3.h[3], w9 +; CHECK-GI-NEXT: fmov w9, s0 +; CHECK-GI-NEXT: sxtb w11, w11 +; CHECK-GI-NEXT: mov v22.h[3], w10 +; CHECK-GI-NEXT: fmov w10, s17 +; CHECK-GI-NEXT: sxtb w8, w8 +; CHECK-GI-NEXT: sxtb w9, w9 +; CHECK-GI-NEXT: mov v1.h[4], w11 +; CHECK-GI-NEXT: mov v2.h[4], w8 +; CHECK-GI-NEXT: sxtb w10, w10 +; CHECK-GI-NEXT: mov v3.h[4], w9 +; CHECK-GI-NEXT: mov v22.h[4], w10 +; CHECK-GI-NEXT: mul v0.8h, v1.8h, v2.8h +; CHECK-GI-NEXT: mul v1.8h, v3.8h, v22.8h +; CHECK-GI-NEXT: smov w8, v0.h[0] +; CHECK-GI-NEXT: smov w9, v0.h[4] +; CHECK-GI-NEXT: smov w11, v0.h[1] +; CHECK-GI-NEXT: smov w10, v1.h[0] +; CHECK-GI-NEXT: smov w12, v1.h[4] +; CHECK-GI-NEXT: smov w13, v1.h[1] +; CHECK-GI-NEXT: fmov s2, w8 +; CHECK-GI-NEXT: fmov s3, w9 +; CHECK-GI-NEXT: smov w8, v0.h[2] +; CHECK-GI-NEXT: smov w9, v1.h[2] +; CHECK-GI-NEXT: fmov s4, w10 +; CHECK-GI-NEXT: fmov s5, w12 +; CHECK-GI-NEXT: mov v2.s[1], w11 +; CHECK-GI-NEXT: mov v3.s[1], wzr +; CHECK-GI-NEXT: smov w10, v0.h[3] +; CHECK-GI-NEXT: smov w11, v1.h[3] +; CHECK-GI-NEXT: mov v4.s[1], w13 +; CHECK-GI-NEXT: mov v5.s[1], wzr +; CHECK-GI-NEXT: mov v2.s[2], w8 +; CHECK-GI-NEXT: mov v3.s[2], wzr +; CHECK-GI-NEXT: mov v4.s[2], w9 +; CHECK-GI-NEXT: mov v5.s[2], wzr +; CHECK-GI-NEXT: mov v2.s[3], w10 +; CHECK-GI-NEXT: mov v3.s[3], wzr +; CHECK-GI-NEXT: mov v4.s[3], w11 +; CHECK-GI-NEXT: mov v5.s[3], wzr +; CHECK-GI-NEXT: add v0.4s, v2.4s, v3.4s +; CHECK-GI-NEXT: add v1.4s, v4.4s, v5.4s +; CHECK-GI-NEXT: addv s0, v0.4s +; CHECK-GI-NEXT: addv s1, v1.4s ; CHECK-GI-NEXT: fmov w8, s0 ; CHECK-GI-NEXT: fmov w9, s1 ; CHECK-GI-NEXT: add w0, w8, w9 @@ -2303,11 +2379,14 @@ define i32 @test_udot_v25i8(ptr nocapture readonly %a, ptr nocapture readonly %b ; ; CHECK-GI-LABEL: test_udot_v25i8: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: stp x26, x25, [sp, #-64]! // 16-byte Folded Spill -; CHECK-GI-NEXT: stp x24, x23, [sp, #16] // 16-byte Folded Spill -; CHECK-GI-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill -; CHECK-GI-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill -; CHECK-GI-NEXT: .cfi_def_cfa_offset 64 +; CHECK-GI-NEXT: sub sp, sp, #112 +; CHECK-GI-NEXT: stp x29, x30, [sp, #16] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 112 ; CHECK-GI-NEXT: .cfi_offset w19, -8 ; CHECK-GI-NEXT: .cfi_offset w20, -16 ; CHECK-GI-NEXT: .cfi_offset w21, -24 @@ -2316,132 +2395,282 @@ define i32 @test_udot_v25i8(ptr nocapture readonly %a, ptr nocapture readonly %b ; CHECK-GI-NEXT: .cfi_offset w24, -48 ; CHECK-GI-NEXT: .cfi_offset w25, -56 ; CHECK-GI-NEXT: .cfi_offset w26, -64 -; CHECK-GI-NEXT: ldp q1, q7, [x1] +; CHECK-GI-NEXT: .cfi_offset w27, -72 +; CHECK-GI-NEXT: .cfi_offset w28, -80 +; CHECK-GI-NEXT: .cfi_offset w30, -88 +; CHECK-GI-NEXT: .cfi_offset w29, -96 +; CHECK-GI-NEXT: ldp q2, q1, [x1] ; CHECK-GI-NEXT: fmov s0, wzr -; CHECK-GI-NEXT: ldp q16, q3, [x0] -; CHECK-GI-NEXT: umov w9, v1.b[4] -; CHECK-GI-NEXT: umov w11, v1.b[5] -; CHECK-GI-NEXT: umov w18, v1.b[0] -; CHECK-GI-NEXT: umov w0, v1.b[12] -; CHECK-GI-NEXT: umov w3, v7.b[4] -; CHECK-GI-NEXT: umov w12, v1.b[1] -; CHECK-GI-NEXT: umov w13, v1.b[6] -; CHECK-GI-NEXT: umov w1, v1.b[13] -; CHECK-GI-NEXT: umov w4, v7.b[5] -; CHECK-GI-NEXT: umov w15, v1.b[2] -; CHECK-GI-NEXT: umov w8, v1.b[3] -; CHECK-GI-NEXT: umov w16, v1.b[7] -; CHECK-GI-NEXT: fmov s2, w9 -; CHECK-GI-NEXT: umov w14, v1.b[8] -; CHECK-GI-NEXT: umov w17, v1.b[9] -; CHECK-GI-NEXT: umov w10, v1.b[10] -; CHECK-GI-NEXT: umov w9, v1.b[11] -; CHECK-GI-NEXT: umov w5, v1.b[14] -; CHECK-GI-NEXT: umov w6, v7.b[0] -; CHECK-GI-NEXT: fmov s4, w0 -; CHECK-GI-NEXT: fmov s5, w3 -; CHECK-GI-NEXT: mov v2.s[1], w11 -; CHECK-GI-NEXT: umov w11, v1.b[15] -; CHECK-GI-NEXT: fmov s1, w18 -; CHECK-GI-NEXT: umov w7, v7.b[1] -; CHECK-GI-NEXT: umov w18, v7.b[6] -; CHECK-GI-NEXT: umov w21, v16.b[4] -; CHECK-GI-NEXT: mov v4.s[1], w1 -; CHECK-GI-NEXT: mov v5.s[1], w4 -; CHECK-GI-NEXT: fmov s6, w14 -; CHECK-GI-NEXT: mov v1.s[1], w12 -; CHECK-GI-NEXT: umov w12, v7.b[3] -; CHECK-GI-NEXT: umov w14, v7.b[7] -; CHECK-GI-NEXT: mov v2.s[2], w13 -; CHECK-GI-NEXT: umov w13, v7.b[2] -; CHECK-GI-NEXT: umov w0, v7.b[8] -; CHECK-GI-NEXT: fmov s7, w6 -; CHECK-GI-NEXT: umov w23, v16.b[12] -; CHECK-GI-NEXT: umov w25, v3.b[4] -; CHECK-GI-NEXT: mov v6.s[1], w17 -; CHECK-GI-NEXT: mov v4.s[2], w5 -; CHECK-GI-NEXT: mov v5.s[2], w18 -; CHECK-GI-NEXT: mov v1.s[2], w15 -; CHECK-GI-NEXT: umov w6, v16.b[0] -; CHECK-GI-NEXT: umov w3, v16.b[1] -; CHECK-GI-NEXT: mov v2.s[3], w16 -; CHECK-GI-NEXT: mov v7.s[1], w7 -; CHECK-GI-NEXT: umov w16, v16.b[2] -; CHECK-GI-NEXT: umov w15, v16.b[3] -; CHECK-GI-NEXT: umov w22, v16.b[5] -; CHECK-GI-NEXT: umov w5, v16.b[6] -; CHECK-GI-NEXT: umov w18, v16.b[7] -; CHECK-GI-NEXT: umov w19, v16.b[8] -; CHECK-GI-NEXT: umov w7, v16.b[9] -; CHECK-GI-NEXT: umov w24, v16.b[13] -; CHECK-GI-NEXT: umov w1, v16.b[10] -; CHECK-GI-NEXT: umov w17, v16.b[11] -; CHECK-GI-NEXT: umov w20, v16.b[14] -; CHECK-GI-NEXT: umov w4, v16.b[15] -; CHECK-GI-NEXT: fmov s16, w21 -; CHECK-GI-NEXT: umov w21, v3.b[8] -; CHECK-GI-NEXT: umov w26, v3.b[5] -; CHECK-GI-NEXT: fmov s17, w23 -; CHECK-GI-NEXT: umov w23, v3.b[0] -; CHECK-GI-NEXT: fmov s18, w25 -; CHECK-GI-NEXT: umov w25, v3.b[3] -; CHECK-GI-NEXT: mov v16.s[1], w22 -; CHECK-GI-NEXT: umov w22, v3.b[1] -; CHECK-GI-NEXT: fmov s19, w6 -; CHECK-GI-NEXT: mov v17.s[1], w24 -; CHECK-GI-NEXT: umov w24, v3.b[2] -; CHECK-GI-NEXT: umov w6, v3.b[7] -; CHECK-GI-NEXT: mul w0, w0, w21 -; CHECK-GI-NEXT: mov v18.s[1], w26 -; CHECK-GI-NEXT: umov w26, v3.b[6] -; CHECK-GI-NEXT: fmov s3, w19 -; CHECK-GI-NEXT: fmov s20, w23 -; CHECK-GI-NEXT: mov v19.s[1], w3 -; CHECK-GI-NEXT: mov v16.s[2], w5 +; CHECK-GI-NEXT: str w2, [sp, #12] // 4-byte Folded Spill +; CHECK-GI-NEXT: mov b6, v2.b[3] +; CHECK-GI-NEXT: mov b7, v2.b[4] +; CHECK-GI-NEXT: mov b16, v2.b[5] +; CHECK-GI-NEXT: mov b19, v2.b[8] +; CHECK-GI-NEXT: mov b4, v2.b[1] +; CHECK-GI-NEXT: mov b5, v2.b[2] +; CHECK-GI-NEXT: mov b17, v2.b[6] +; CHECK-GI-NEXT: mov b18, v2.b[7] +; CHECK-GI-NEXT: mov b20, v2.b[9] +; CHECK-GI-NEXT: mov b21, v2.b[10] +; CHECK-GI-NEXT: mov b22, v2.b[11] +; CHECK-GI-NEXT: fmov w7, s2 +; CHECK-GI-NEXT: fmov w13, s6 +; CHECK-GI-NEXT: mov b6, v2.b[12] +; CHECK-GI-NEXT: fmov w2, s7 +; CHECK-GI-NEXT: mov b7, v2.b[13] +; CHECK-GI-NEXT: fmov w11, s16 +; CHECK-GI-NEXT: mov b16, v2.b[14] +; CHECK-GI-NEXT: mov b23, v2.b[15] +; CHECK-GI-NEXT: ldp q3, q2, [x0] +; CHECK-GI-NEXT: fmov w26, s19 +; CHECK-GI-NEXT: fmov w19, s4 +; CHECK-GI-NEXT: stp s17, s18, [sp, #4] // 8-byte Folded Spill +; CHECK-GI-NEXT: fmov w29, s5 +; CHECK-GI-NEXT: fmov w24, s20 +; CHECK-GI-NEXT: uxtb w8, w7 +; CHECK-GI-NEXT: mov b4, v3.b[2] +; CHECK-GI-NEXT: mov b5, v3.b[1] +; CHECK-GI-NEXT: uxtb w13, w13 +; CHECK-GI-NEXT: mov b17, v1.b[1] +; CHECK-GI-NEXT: fmov w22, s21 +; CHECK-GI-NEXT: uxtb w26, w26 +; CHECK-GI-NEXT: mov b18, v1.b[2] +; CHECK-GI-NEXT: fmov w18, s22 +; CHECK-GI-NEXT: uxtb w24, w24 +; CHECK-GI-NEXT: mov b19, v1.b[3] +; CHECK-GI-NEXT: fmov w16, s6 +; CHECK-GI-NEXT: uxtb w19, w19 +; CHECK-GI-NEXT: mov b21, v1.b[4] +; CHECK-GI-NEXT: fmov w15, s7 +; CHECK-GI-NEXT: uxtb w22, w22 +; CHECK-GI-NEXT: mov b7, v1.b[5] +; CHECK-GI-NEXT: mov b6, v3.b[3] +; CHECK-GI-NEXT: uxtb w11, w11 +; CHECK-GI-NEXT: fmov w12, s23 +; CHECK-GI-NEXT: mov b22, v1.b[6] +; CHECK-GI-NEXT: mov b23, v1.b[7] +; CHECK-GI-NEXT: mov b20, v3.b[4] +; CHECK-GI-NEXT: fmov w28, s4 +; CHECK-GI-NEXT: fmov s4, w26 +; CHECK-GI-NEXT: fmov w14, s16 +; CHECK-GI-NEXT: fmov w27, s17 +; CHECK-GI-NEXT: fmov w5, s18 +; CHECK-GI-NEXT: uxtb w12, w12 +; CHECK-GI-NEXT: fmov w4, s19 +; CHECK-GI-NEXT: mov b19, v3.b[5] +; CHECK-GI-NEXT: uxtb w28, w28 +; CHECK-GI-NEXT: fmov w3, s21 +; CHECK-GI-NEXT: mov b18, v3.b[6] +; CHECK-GI-NEXT: uxtb w27, w27 +; CHECK-GI-NEXT: uxtb w5, w5 +; CHECK-GI-NEXT: fmov w1, s7 +; CHECK-GI-NEXT: mov b16, v3.b[7] +; CHECK-GI-NEXT: fmov w0, s22 +; CHECK-GI-NEXT: mov b17, v3.b[8] +; CHECK-GI-NEXT: fmov w17, s23 +; CHECK-GI-NEXT: mov b7, v3.b[9] +; CHECK-GI-NEXT: fmov w30, s5 +; CHECK-GI-NEXT: mov b5, v3.b[10] +; CHECK-GI-NEXT: mov b21, v3.b[11] +; CHECK-GI-NEXT: fmov w25, s6 +; CHECK-GI-NEXT: mov b6, v3.b[12] +; CHECK-GI-NEXT: fmov w23, s20 +; CHECK-GI-NEXT: mov b20, v3.b[13] +; CHECK-GI-NEXT: mov b22, v3.b[14] +; CHECK-GI-NEXT: fmov w6, s3 +; CHECK-GI-NEXT: mov b23, v3.b[15] +; CHECK-GI-NEXT: fmov s3, w8 +; CHECK-GI-NEXT: fmov w8, s1 +; CHECK-GI-NEXT: mov v4.h[1], w24 +; CHECK-GI-NEXT: fmov w21, s19 +; CHECK-GI-NEXT: mov b19, v2.b[1] +; CHECK-GI-NEXT: fmov w9, s17 +; CHECK-GI-NEXT: fmov w24, s6 +; CHECK-GI-NEXT: fmov w7, s16 +; CHECK-GI-NEXT: mov b16, v2.b[2] +; CHECK-GI-NEXT: uxtb w8, w8 +; CHECK-GI-NEXT: mov v3.h[1], w19 +; CHECK-GI-NEXT: uxtb w19, w29 +; CHECK-GI-NEXT: uxtb w9, w9 +; CHECK-GI-NEXT: fmov w29, s5 +; CHECK-GI-NEXT: mov v4.h[2], w22 +; CHECK-GI-NEXT: uxtb w22, w6 +; CHECK-GI-NEXT: fmov s5, w8 +; CHECK-GI-NEXT: fmov w10, s7 +; CHECK-GI-NEXT: fmov s7, w9 +; CHECK-GI-NEXT: fmov w9, s16 +; CHECK-GI-NEXT: fmov w20, s18 +; CHECK-GI-NEXT: uxtb w29, w29 +; CHECK-GI-NEXT: fmov s6, w22 +; CHECK-GI-NEXT: fmov w22, s2 +; CHECK-GI-NEXT: uxtb w10, w10 +; CHECK-GI-NEXT: mov v5.h[1], w27 +; CHECK-GI-NEXT: uxtb w27, w30 +; CHECK-GI-NEXT: uxtb w9, w9 +; CHECK-GI-NEXT: mov b18, v2.b[3] +; CHECK-GI-NEXT: mov v3.h[2], w19 +; CHECK-GI-NEXT: uxtb w22, w22 +; CHECK-GI-NEXT: mov v6.h[1], w27 +; CHECK-GI-NEXT: fmov w27, s19 +; CHECK-GI-NEXT: mov v7.h[1], w10 +; CHECK-GI-NEXT: fmov w26, s21 +; CHECK-GI-NEXT: mov b17, v2.b[4] +; CHECK-GI-NEXT: fmov s16, w22 +; CHECK-GI-NEXT: mov v5.h[2], w5 +; CHECK-GI-NEXT: uxtb w5, w25 +; CHECK-GI-NEXT: uxtb w27, w27 +; CHECK-GI-NEXT: fmov w10, s18 +; CHECK-GI-NEXT: mov v3.h[3], w13 +; CHECK-GI-NEXT: uxtb w13, w4 +; CHECK-GI-NEXT: mov v6.h[2], w28 +; CHECK-GI-NEXT: fmov w8, s20 +; CHECK-GI-NEXT: mov v16.h[1], w27 +; CHECK-GI-NEXT: mov v7.h[2], w29 +; CHECK-GI-NEXT: mov b20, v2.b[5] +; CHECK-GI-NEXT: uxtb w10, w10 +; CHECK-GI-NEXT: ldp x29, x30, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: uxtb w8, w8 +; CHECK-GI-NEXT: fmov w22, s17 +; CHECK-GI-NEXT: mov v5.h[3], w13 +; CHECK-GI-NEXT: uxtb w13, w2 +; CHECK-GI-NEXT: mov v6.h[3], w5 +; CHECK-GI-NEXT: mov b21, v2.b[6] +; CHECK-GI-NEXT: mov v16.h[2], w9 +; CHECK-GI-NEXT: uxtb w9, w18 +; CHECK-GI-NEXT: uxtb w18, w23 +; CHECK-GI-NEXT: mov v3.h[4], w13 +; CHECK-GI-NEXT: uxtb w13, w24 +; CHECK-GI-NEXT: fmov w27, s20 +; CHECK-GI-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v4.h[3], w9 +; CHECK-GI-NEXT: uxtb w9, w26 +; CHECK-GI-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v16.h[3], w10 +; CHECK-GI-NEXT: uxtb w10, w3 +; CHECK-GI-NEXT: mov v6.h[4], w18 +; CHECK-GI-NEXT: ldr w18, [sp, #4] // 4-byte Folded Reload +; CHECK-GI-NEXT: mov v7.h[3], w9 +; CHECK-GI-NEXT: uxtb w9, w16 +; CHECK-GI-NEXT: uxtb w16, w22 +; CHECK-GI-NEXT: mov v5.h[4], w10 +; CHECK-GI-NEXT: uxtb w10, w15 +; CHECK-GI-NEXT: uxtb w18, w18 +; CHECK-GI-NEXT: mov v4.h[4], w9 +; CHECK-GI-NEXT: uxtb w9, w21 +; CHECK-GI-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v16.h[4], w16 +; CHECK-GI-NEXT: mov v7.h[4], w13 +; CHECK-GI-NEXT: ldr w13, [sp, #8] // 4-byte Folded Reload +; CHECK-GI-NEXT: mov v6.h[5], w9 +; CHECK-GI-NEXT: uxtb w9, w1 +; CHECK-GI-NEXT: mov v3.h[5], w11 +; CHECK-GI-NEXT: uxtb w11, w27 +; CHECK-GI-NEXT: fmov w19, s22 +; CHECK-GI-NEXT: fmov w28, s21 +; CHECK-GI-NEXT: uxtb w13, w13 +; CHECK-GI-NEXT: mov b17, v2.b[7] +; CHECK-GI-NEXT: mov v5.h[5], w9 +; CHECK-GI-NEXT: uxtb w9, w0 +; CHECK-GI-NEXT: mov v4.h[5], w10 +; CHECK-GI-NEXT: uxtb w10, w20 +; CHECK-GI-NEXT: mov v7.h[5], w8 +; CHECK-GI-NEXT: mov v16.h[5], w11 +; CHECK-GI-NEXT: uxtb w8, w14 +; CHECK-GI-NEXT: uxtb w11, w28 +; CHECK-GI-NEXT: mov v6.h[6], w10 +; CHECK-GI-NEXT: uxtb w10, w19 +; CHECK-GI-NEXT: fmov w6, s23 +; CHECK-GI-NEXT: mov v5.h[6], w9 +; CHECK-GI-NEXT: fmov w9, s17 +; CHECK-GI-NEXT: mov v3.h[6], w18 +; CHECK-GI-NEXT: mov v4.h[6], w8 +; CHECK-GI-NEXT: uxtb w8, w7 +; CHECK-GI-NEXT: mov v7.h[6], w10 +; CHECK-GI-NEXT: mov v16.h[6], w11 +; CHECK-GI-NEXT: uxtb w10, w6 ; CHECK-GI-NEXT: mov v0.s[1], wzr -; CHECK-GI-NEXT: mov v6.s[2], w10 -; CHECK-GI-NEXT: fmov s21, w0 -; CHECK-GI-NEXT: mov v17.s[2], w20 -; CHECK-GI-NEXT: mov v4.s[3], w11 -; CHECK-GI-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload -; CHECK-GI-NEXT: mov v3.s[1], w7 -; CHECK-GI-NEXT: mov v20.s[1], w22 -; CHECK-GI-NEXT: ldp x22, x21, [sp, #32] // 16-byte Folded Reload -; CHECK-GI-NEXT: mov v18.s[2], w26 -; CHECK-GI-NEXT: mov v21.s[1], wzr -; CHECK-GI-NEXT: mov v16.s[3], w18 -; CHECK-GI-NEXT: mov v17.s[3], w4 -; CHECK-GI-NEXT: mov v7.s[2], w13 -; CHECK-GI-NEXT: mov v5.s[3], w14 -; CHECK-GI-NEXT: mov v19.s[2], w16 -; CHECK-GI-NEXT: mov v3.s[2], w1 +; CHECK-GI-NEXT: mov v6.h[7], w8 +; CHECK-GI-NEXT: uxtb w8, w17 +; CHECK-GI-NEXT: uxtb w9, w9 +; CHECK-GI-NEXT: mov v3.h[7], w13 +; CHECK-GI-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v4.h[7], w12 +; CHECK-GI-NEXT: mov v5.h[7], w8 +; CHECK-GI-NEXT: mov v7.h[7], w10 +; CHECK-GI-NEXT: mov v16.h[7], w9 +; CHECK-GI-NEXT: umov w8, v1.b[8] +; CHECK-GI-NEXT: umov w9, v2.b[8] ; CHECK-GI-NEXT: mov v0.s[2], wzr -; CHECK-GI-NEXT: mov v20.s[2], w24 -; CHECK-GI-NEXT: ldp x24, x23, [sp, #16] // 16-byte Folded Reload -; CHECK-GI-NEXT: mov v18.s[3], w6 -; CHECK-GI-NEXT: mov v21.s[2], wzr -; CHECK-GI-NEXT: mul v2.4s, v2.4s, v16.4s -; CHECK-GI-NEXT: mul v4.4s, v4.4s, v17.4s -; CHECK-GI-NEXT: mov v1.s[3], w8 -; CHECK-GI-NEXT: mov v6.s[3], w9 -; CHECK-GI-NEXT: mov v7.s[3], w12 -; CHECK-GI-NEXT: mov v19.s[3], w15 -; CHECK-GI-NEXT: mov v3.s[3], w17 -; CHECK-GI-NEXT: mov v20.s[3], w25 +; CHECK-GI-NEXT: mul v3.8h, v3.8h, v6.8h +; CHECK-GI-NEXT: mul v2.8h, v4.8h, v7.8h +; CHECK-GI-NEXT: mul v1.8h, v5.8h, v16.8h +; CHECK-GI-NEXT: mul w15, w8, w9 ; CHECK-GI-NEXT: mov v0.s[3], wzr -; CHECK-GI-NEXT: mul v5.4s, v5.4s, v18.4s -; CHECK-GI-NEXT: mov v21.s[3], wzr -; CHECK-GI-NEXT: mla v2.4s, v1.4s, v19.4s -; CHECK-GI-NEXT: mla v4.4s, v6.4s, v3.4s -; CHECK-GI-NEXT: mla v5.4s, v7.4s, v20.4s -; CHECK-GI-NEXT: add v0.4s, v21.4s, v0.4s -; CHECK-GI-NEXT: add v1.4s, v2.4s, v4.4s -; CHECK-GI-NEXT: add v0.4s, v5.4s, v0.4s +; CHECK-GI-NEXT: umov w16, v3.h[0] +; CHECK-GI-NEXT: umov w18, v3.h[4] +; CHECK-GI-NEXT: umov w17, v3.h[1] +; CHECK-GI-NEXT: umov w1, v2.h[0] +; CHECK-GI-NEXT: umov w3, v2.h[4] +; CHECK-GI-NEXT: umov w0, v3.h[5] +; CHECK-GI-NEXT: umov w5, v1.h[0] +; CHECK-GI-NEXT: umov w7, v1.h[4] +; CHECK-GI-NEXT: umov w2, v2.h[1] +; CHECK-GI-NEXT: umov w4, v2.h[5] +; CHECK-GI-NEXT: umov w6, v1.h[1] +; CHECK-GI-NEXT: umov w19, v1.h[5] +; CHECK-GI-NEXT: umov w10, v3.h[2] +; CHECK-GI-NEXT: umov w8, v3.h[3] +; CHECK-GI-NEXT: umov w11, v3.h[6] +; CHECK-GI-NEXT: umov w9, v3.h[7] +; CHECK-GI-NEXT: fmov s3, w16 +; CHECK-GI-NEXT: fmov s4, w18 +; CHECK-GI-NEXT: fmov s5, w1 +; CHECK-GI-NEXT: fmov s6, w3 +; CHECK-GI-NEXT: fmov s7, w5 +; CHECK-GI-NEXT: fmov s16, w7 +; CHECK-GI-NEXT: fmov s17, w15 +; CHECK-GI-NEXT: umov w12, v2.h[2] +; CHECK-GI-NEXT: umov w13, v2.h[6] +; CHECK-GI-NEXT: umov w14, v1.h[2] +; CHECK-GI-NEXT: umov w16, v1.h[6] +; CHECK-GI-NEXT: mov v3.s[1], w17 +; CHECK-GI-NEXT: mov v4.s[1], w0 +; CHECK-GI-NEXT: mov v5.s[1], w2 +; CHECK-GI-NEXT: mov v6.s[1], w4 +; CHECK-GI-NEXT: mov v7.s[1], w6 +; CHECK-GI-NEXT: mov v16.s[1], w19 +; CHECK-GI-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v17.s[1], wzr +; CHECK-GI-NEXT: umov w15, v2.h[3] +; CHECK-GI-NEXT: umov w17, v2.h[7] +; CHECK-GI-NEXT: umov w18, v1.h[3] +; CHECK-GI-NEXT: umov w0, v1.h[7] +; CHECK-GI-NEXT: mov v3.s[2], w10 +; CHECK-GI-NEXT: mov v4.s[2], w11 +; CHECK-GI-NEXT: mov v5.s[2], w12 +; CHECK-GI-NEXT: mov v6.s[2], w13 +; CHECK-GI-NEXT: mov v7.s[2], w14 +; CHECK-GI-NEXT: mov v16.s[2], w16 +; CHECK-GI-NEXT: mov v17.s[2], wzr +; CHECK-GI-NEXT: mov v3.s[3], w8 +; CHECK-GI-NEXT: mov v4.s[3], w9 +; CHECK-GI-NEXT: ldr w9, [sp, #12] // 4-byte Folded Reload +; CHECK-GI-NEXT: mov v5.s[3], w15 +; CHECK-GI-NEXT: mov v6.s[3], w17 +; CHECK-GI-NEXT: mov v7.s[3], w18 +; CHECK-GI-NEXT: mov v16.s[3], w0 +; CHECK-GI-NEXT: mov v17.s[3], wzr +; CHECK-GI-NEXT: add v1.4s, v3.4s, v4.4s +; CHECK-GI-NEXT: add v2.4s, v5.4s, v6.4s +; CHECK-GI-NEXT: add v3.4s, v7.4s, v16.4s +; CHECK-GI-NEXT: add v0.4s, v17.4s, v0.4s +; CHECK-GI-NEXT: add v1.4s, v1.4s, v2.4s +; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s ; CHECK-GI-NEXT: add v0.4s, v1.4s, v0.4s ; CHECK-GI-NEXT: addv s0, v0.4s ; CHECK-GI-NEXT: fmov w8, s0 -; CHECK-GI-NEXT: add w0, w8, w2 -; CHECK-GI-NEXT: ldp x26, x25, [sp], #64 // 16-byte Folded Reload +; CHECK-GI-NEXT: add w0, w8, w9 +; CHECK-GI-NEXT: add sp, sp, #112 ; CHECK-GI-NEXT: ret entry: %0 = load <25 x i8>, ptr %a @@ -2580,11 +2809,14 @@ define i32 @test_sdot_v25i8(ptr nocapture readonly %a, ptr nocapture readonly %b ; ; CHECK-GI-LABEL: test_sdot_v25i8: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: stp x26, x25, [sp, #-64]! // 16-byte Folded Spill -; CHECK-GI-NEXT: stp x24, x23, [sp, #16] // 16-byte Folded Spill -; CHECK-GI-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill -; CHECK-GI-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill -; CHECK-GI-NEXT: .cfi_def_cfa_offset 64 +; CHECK-GI-NEXT: sub sp, sp, #112 +; CHECK-GI-NEXT: stp x29, x30, [sp, #16] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 112 ; CHECK-GI-NEXT: .cfi_offset w19, -8 ; CHECK-GI-NEXT: .cfi_offset w20, -16 ; CHECK-GI-NEXT: .cfi_offset w21, -24 @@ -2593,132 +2825,283 @@ define i32 @test_sdot_v25i8(ptr nocapture readonly %a, ptr nocapture readonly %b ; CHECK-GI-NEXT: .cfi_offset w24, -48 ; CHECK-GI-NEXT: .cfi_offset w25, -56 ; CHECK-GI-NEXT: .cfi_offset w26, -64 -; CHECK-GI-NEXT: ldp q1, q7, [x1] +; CHECK-GI-NEXT: .cfi_offset w27, -72 +; CHECK-GI-NEXT: .cfi_offset w28, -80 +; CHECK-GI-NEXT: .cfi_offset w30, -88 +; CHECK-GI-NEXT: .cfi_offset w29, -96 +; CHECK-GI-NEXT: ldp q2, q1, [x1] ; CHECK-GI-NEXT: fmov s0, wzr -; CHECK-GI-NEXT: ldp q16, q3, [x0] -; CHECK-GI-NEXT: smov w9, v1.b[4] -; CHECK-GI-NEXT: smov w11, v1.b[5] -; CHECK-GI-NEXT: smov w18, v1.b[0] -; CHECK-GI-NEXT: smov w0, v1.b[12] -; CHECK-GI-NEXT: smov w3, v7.b[4] -; CHECK-GI-NEXT: smov w12, v1.b[1] -; CHECK-GI-NEXT: smov w13, v1.b[6] -; CHECK-GI-NEXT: smov w1, v1.b[13] -; CHECK-GI-NEXT: smov w4, v7.b[5] -; CHECK-GI-NEXT: smov w15, v1.b[2] -; CHECK-GI-NEXT: smov w8, v1.b[3] -; CHECK-GI-NEXT: smov w16, v1.b[7] -; CHECK-GI-NEXT: fmov s2, w9 -; CHECK-GI-NEXT: smov w14, v1.b[8] -; CHECK-GI-NEXT: smov w17, v1.b[9] -; CHECK-GI-NEXT: smov w10, v1.b[10] -; CHECK-GI-NEXT: smov w9, v1.b[11] -; CHECK-GI-NEXT: smov w5, v1.b[14] -; CHECK-GI-NEXT: smov w6, v7.b[0] -; CHECK-GI-NEXT: fmov s4, w0 -; CHECK-GI-NEXT: fmov s5, w3 -; CHECK-GI-NEXT: mov v2.s[1], w11 -; CHECK-GI-NEXT: smov w11, v1.b[15] -; CHECK-GI-NEXT: fmov s1, w18 -; CHECK-GI-NEXT: smov w7, v7.b[1] -; CHECK-GI-NEXT: smov w18, v7.b[6] -; CHECK-GI-NEXT: smov w21, v16.b[4] -; CHECK-GI-NEXT: mov v4.s[1], w1 -; CHECK-GI-NEXT: mov v5.s[1], w4 -; CHECK-GI-NEXT: fmov s6, w14 -; CHECK-GI-NEXT: mov v1.s[1], w12 -; CHECK-GI-NEXT: smov w12, v7.b[3] -; CHECK-GI-NEXT: smov w14, v7.b[7] -; CHECK-GI-NEXT: mov v2.s[2], w13 -; CHECK-GI-NEXT: smov w13, v7.b[2] -; CHECK-GI-NEXT: smov w0, v7.b[8] -; CHECK-GI-NEXT: fmov s7, w6 -; CHECK-GI-NEXT: smov w23, v16.b[12] -; CHECK-GI-NEXT: smov w25, v3.b[4] -; CHECK-GI-NEXT: mov v6.s[1], w17 -; CHECK-GI-NEXT: mov v4.s[2], w5 -; CHECK-GI-NEXT: mov v5.s[2], w18 -; CHECK-GI-NEXT: mov v1.s[2], w15 -; CHECK-GI-NEXT: smov w6, v16.b[0] -; CHECK-GI-NEXT: smov w3, v16.b[1] -; CHECK-GI-NEXT: mov v2.s[3], w16 -; CHECK-GI-NEXT: mov v7.s[1], w7 -; CHECK-GI-NEXT: smov w16, v16.b[2] -; CHECK-GI-NEXT: smov w15, v16.b[3] -; CHECK-GI-NEXT: smov w22, v16.b[5] -; CHECK-GI-NEXT: smov w5, v16.b[6] -; CHECK-GI-NEXT: smov w18, v16.b[7] -; CHECK-GI-NEXT: smov w19, v16.b[8] -; CHECK-GI-NEXT: smov w7, v16.b[9] -; CHECK-GI-NEXT: smov w24, v16.b[13] -; CHECK-GI-NEXT: smov w1, v16.b[10] -; CHECK-GI-NEXT: smov w17, v16.b[11] -; CHECK-GI-NEXT: smov w20, v16.b[14] -; CHECK-GI-NEXT: smov w4, v16.b[15] -; CHECK-GI-NEXT: fmov s16, w21 -; CHECK-GI-NEXT: smov w21, v3.b[8] -; CHECK-GI-NEXT: smov w26, v3.b[5] -; CHECK-GI-NEXT: fmov s17, w23 -; CHECK-GI-NEXT: smov w23, v3.b[0] -; CHECK-GI-NEXT: fmov s18, w25 -; CHECK-GI-NEXT: smov w25, v3.b[3] -; CHECK-GI-NEXT: mov v16.s[1], w22 -; CHECK-GI-NEXT: smov w22, v3.b[1] -; CHECK-GI-NEXT: fmov s19, w6 -; CHECK-GI-NEXT: mov v17.s[1], w24 -; CHECK-GI-NEXT: smov w24, v3.b[2] -; CHECK-GI-NEXT: smov w6, v3.b[7] -; CHECK-GI-NEXT: mul w0, w0, w21 -; CHECK-GI-NEXT: mov v18.s[1], w26 -; CHECK-GI-NEXT: smov w26, v3.b[6] -; CHECK-GI-NEXT: fmov s3, w19 -; CHECK-GI-NEXT: fmov s20, w23 -; CHECK-GI-NEXT: mov v19.s[1], w3 -; CHECK-GI-NEXT: mov v16.s[2], w5 +; CHECK-GI-NEXT: str w2, [sp, #12] // 4-byte Folded Spill +; CHECK-GI-NEXT: mov b5, v2.b[2] +; CHECK-GI-NEXT: mov b6, v2.b[3] +; CHECK-GI-NEXT: mov b7, v2.b[4] +; CHECK-GI-NEXT: mov b16, v2.b[5] +; CHECK-GI-NEXT: mov b17, v2.b[6] +; CHECK-GI-NEXT: mov b18, v2.b[7] +; CHECK-GI-NEXT: mov b19, v2.b[8] +; CHECK-GI-NEXT: mov b20, v2.b[9] +; CHECK-GI-NEXT: mov b21, v2.b[15] +; CHECK-GI-NEXT: mov b3, v2.b[1] +; CHECK-GI-NEXT: fmov w19, s2 +; CHECK-GI-NEXT: mov b22, v1.b[6] +; CHECK-GI-NEXT: fmov w6, s5 +; CHECK-GI-NEXT: mov b5, v2.b[10] +; CHECK-GI-NEXT: fmov w14, s6 +; CHECK-GI-NEXT: mov b6, v2.b[11] +; CHECK-GI-NEXT: fmov w2, s7 +; CHECK-GI-NEXT: stp s17, s18, [sp, #4] // 8-byte Folded Spill +; CHECK-GI-NEXT: mov b7, v2.b[12] +; CHECK-GI-NEXT: fmov w11, s16 +; CHECK-GI-NEXT: sxtb w28, w19 +; CHECK-GI-NEXT: mov b16, v2.b[13] +; CHECK-GI-NEXT: mov b18, v1.b[1] +; CHECK-GI-NEXT: sxtb w6, w6 +; CHECK-GI-NEXT: mov b17, v2.b[14] +; CHECK-GI-NEXT: ldp q4, q2, [x0] +; CHECK-GI-NEXT: fmov w25, s19 +; CHECK-GI-NEXT: fmov w24, s20 +; CHECK-GI-NEXT: fmov w22, s5 +; CHECK-GI-NEXT: mov b5, v1.b[2] +; CHECK-GI-NEXT: fmov w0, s6 +; CHECK-GI-NEXT: sxtb w14, w14 +; CHECK-GI-NEXT: mov b20, v1.b[3] +; CHECK-GI-NEXT: fmov w16, s7 +; CHECK-GI-NEXT: mov b7, v1.b[4] +; CHECK-GI-NEXT: fmov w15, s16 +; CHECK-GI-NEXT: sxtb w25, w25 +; CHECK-GI-NEXT: sxtb w24, w24 +; CHECK-GI-NEXT: mov b16, v1.b[5] +; CHECK-GI-NEXT: fmov w13, s21 +; CHECK-GI-NEXT: sxtb w22, w22 +; CHECK-GI-NEXT: mov b6, v4.b[2] +; CHECK-GI-NEXT: fmov w26, s18 +; CHECK-GI-NEXT: sxtb w0, w0 +; CHECK-GI-NEXT: mov b21, v1.b[7] +; CHECK-GI-NEXT: mov b18, v4.b[4] +; CHECK-GI-NEXT: fmov w7, s3 +; CHECK-GI-NEXT: mov b3, v4.b[1] +; CHECK-GI-NEXT: fmov w12, s17 +; CHECK-GI-NEXT: fmov w5, s5 +; CHECK-GI-NEXT: mov b19, v4.b[3] +; CHECK-GI-NEXT: fmov w4, s20 +; CHECK-GI-NEXT: fmov w3, s7 +; CHECK-GI-NEXT: sxtb w29, w7 +; CHECK-GI-NEXT: mov b17, v4.b[5] +; CHECK-GI-NEXT: fmov w1, s16 +; CHECK-GI-NEXT: sxtb w5, w5 +; CHECK-GI-NEXT: mov b16, v4.b[6] +; CHECK-GI-NEXT: fmov w18, s22 +; CHECK-GI-NEXT: mov b7, v4.b[7] +; CHECK-GI-NEXT: fmov w17, s21 +; CHECK-GI-NEXT: mov b5, v4.b[8] +; CHECK-GI-NEXT: mov b20, v4.b[9] +; CHECK-GI-NEXT: fmov w27, s6 +; CHECK-GI-NEXT: mov b6, v4.b[10] +; CHECK-GI-NEXT: mov b21, v4.b[11] +; CHECK-GI-NEXT: fmov w21, s18 +; CHECK-GI-NEXT: mov b18, v4.b[12] +; CHECK-GI-NEXT: mov b22, v4.b[13] +; CHECK-GI-NEXT: mov b23, v4.b[14] +; CHECK-GI-NEXT: fmov w10, s4 +; CHECK-GI-NEXT: sxtb w27, w27 +; CHECK-GI-NEXT: mov b24, v4.b[15] +; CHECK-GI-NEXT: fmov s4, w25 +; CHECK-GI-NEXT: fmov w30, s3 +; CHECK-GI-NEXT: fmov s3, w28 +; CHECK-GI-NEXT: fmov w9, s5 +; CHECK-GI-NEXT: sxtb w10, w10 +; CHECK-GI-NEXT: fmov w7, s7 +; CHECK-GI-NEXT: mov b7, v2.b[1] +; CHECK-GI-NEXT: mov v4.h[1], w24 +; CHECK-GI-NEXT: fmov w24, s1 +; CHECK-GI-NEXT: fmov w8, s20 +; CHECK-GI-NEXT: sxtb w9, w9 +; CHECK-GI-NEXT: mov v3.h[1], w29 +; CHECK-GI-NEXT: fmov w29, s6 +; CHECK-GI-NEXT: fmov s6, w10 +; CHECK-GI-NEXT: fmov w10, s2 +; CHECK-GI-NEXT: fmov w19, s16 +; CHECK-GI-NEXT: sxtb w24, w24 +; CHECK-GI-NEXT: sxtb w8, w8 +; CHECK-GI-NEXT: mov b16, v2.b[3] +; CHECK-GI-NEXT: sxtb w29, w29 +; CHECK-GI-NEXT: fmov w23, s19 +; CHECK-GI-NEXT: mov b19, v2.b[2] +; CHECK-GI-NEXT: sxtb w10, w10 +; CHECK-GI-NEXT: fmov s5, w24 +; CHECK-GI-NEXT: sxtb w24, w30 +; CHECK-GI-NEXT: mov v3.h[2], w6 +; CHECK-GI-NEXT: sxtb w6, w26 +; CHECK-GI-NEXT: fmov w28, s21 +; CHECK-GI-NEXT: sxtb w23, w23 +; CHECK-GI-NEXT: mov v6.h[1], w24 +; CHECK-GI-NEXT: fmov w24, s7 +; CHECK-GI-NEXT: fmov s7, w9 +; CHECK-GI-NEXT: fmov w9, s19 +; CHECK-GI-NEXT: mov v5.h[1], w6 +; CHECK-GI-NEXT: mov v4.h[2], w22 +; CHECK-GI-NEXT: fmov w20, s17 +; CHECK-GI-NEXT: mov b17, v2.b[4] +; CHECK-GI-NEXT: sxtb w24, w24 +; CHECK-GI-NEXT: mov v3.h[3], w14 +; CHECK-GI-NEXT: sxtb w14, w2 +; CHECK-GI-NEXT: sxtb w9, w9 +; CHECK-GI-NEXT: mov v7.h[1], w8 +; CHECK-GI-NEXT: fmov w8, s16 +; CHECK-GI-NEXT: fmov s16, w10 +; CHECK-GI-NEXT: mov v6.h[2], w27 +; CHECK-GI-NEXT: mov v5.h[2], w5 +; CHECK-GI-NEXT: fmov w25, s18 +; CHECK-GI-NEXT: mov v4.h[3], w0 +; CHECK-GI-NEXT: sxtb w0, w4 +; CHECK-GI-NEXT: sxtb w8, w8 +; CHECK-GI-NEXT: mov b18, v2.b[5] +; CHECK-GI-NEXT: fmov w10, s17 +; CHECK-GI-NEXT: mov v16.h[1], w24 +; CHECK-GI-NEXT: mov v7.h[2], w29 +; CHECK-GI-NEXT: mov v3.h[4], w14 +; CHECK-GI-NEXT: sxtb w14, w25 +; CHECK-GI-NEXT: ldp x29, x30, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: sxtb w10, w10 +; CHECK-GI-NEXT: mov v6.h[3], w23 +; CHECK-GI-NEXT: mov v5.h[3], w0 +; CHECK-GI-NEXT: fmov w26, s22 +; CHECK-GI-NEXT: mov b19, v2.b[6] +; CHECK-GI-NEXT: fmov w27, s18 +; CHECK-GI-NEXT: mov v16.h[2], w9 +; CHECK-GI-NEXT: sxtb w9, w28 +; CHECK-GI-NEXT: fmov w22, s23 +; CHECK-GI-NEXT: mov b17, v2.b[7] +; CHECK-GI-NEXT: fmov w6, s24 ; CHECK-GI-NEXT: mov v0.s[1], wzr -; CHECK-GI-NEXT: mov v6.s[2], w10 -; CHECK-GI-NEXT: fmov s21, w0 -; CHECK-GI-NEXT: mov v17.s[2], w20 -; CHECK-GI-NEXT: mov v4.s[3], w11 -; CHECK-GI-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload -; CHECK-GI-NEXT: mov v3.s[1], w7 -; CHECK-GI-NEXT: mov v20.s[1], w22 -; CHECK-GI-NEXT: ldp x22, x21, [sp, #32] // 16-byte Folded Reload -; CHECK-GI-NEXT: mov v18.s[2], w26 -; CHECK-GI-NEXT: mov v21.s[1], wzr -; CHECK-GI-NEXT: mov v16.s[3], w18 -; CHECK-GI-NEXT: mov v17.s[3], w4 -; CHECK-GI-NEXT: mov v7.s[2], w13 -; CHECK-GI-NEXT: mov v5.s[3], w14 -; CHECK-GI-NEXT: mov v19.s[2], w16 -; CHECK-GI-NEXT: mov v3.s[2], w1 +; CHECK-GI-NEXT: mov v7.h[3], w9 +; CHECK-GI-NEXT: sxtb w9, w11 +; CHECK-GI-NEXT: sxtb w11, w21 +; CHECK-GI-NEXT: fmov w24, s19 +; CHECK-GI-NEXT: mov v16.h[3], w8 +; CHECK-GI-NEXT: sxtb w8, w16 +; CHECK-GI-NEXT: sxtb w16, w3 +; CHECK-GI-NEXT: mov v6.h[4], w11 +; CHECK-GI-NEXT: ldr w11, [sp, #4] // 4-byte Folded Reload +; CHECK-GI-NEXT: mov v3.h[5], w9 +; CHECK-GI-NEXT: sxtb w9, w15 +; CHECK-GI-NEXT: sxtb w15, w27 +; CHECK-GI-NEXT: mov v7.h[4], w14 +; CHECK-GI-NEXT: sxtb w14, w1 +; CHECK-GI-NEXT: sxtb w11, w11 +; CHECK-GI-NEXT: mov v4.h[4], w8 +; CHECK-GI-NEXT: sxtb w8, w20 +; CHECK-GI-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v5.h[4], w16 +; CHECK-GI-NEXT: mov v16.h[4], w10 +; CHECK-GI-NEXT: sxtb w10, w26 +; CHECK-GI-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v6.h[5], w8 +; CHECK-GI-NEXT: ldr w8, [sp, #8] // 4-byte Folded Reload +; CHECK-GI-NEXT: mov v7.h[5], w10 +; CHECK-GI-NEXT: sxtb w10, w12 +; CHECK-GI-NEXT: sxtb w12, w18 +; CHECK-GI-NEXT: mov v4.h[5], w9 +; CHECK-GI-NEXT: sxtb w9, w19 +; CHECK-GI-NEXT: mov v5.h[5], w14 +; CHECK-GI-NEXT: sxtb w8, w8 +; CHECK-GI-NEXT: mov v16.h[5], w15 +; CHECK-GI-NEXT: mov v3.h[6], w11 +; CHECK-GI-NEXT: sxtb w11, w22 +; CHECK-GI-NEXT: mov v6.h[6], w9 +; CHECK-GI-NEXT: sxtb w9, w13 +; CHECK-GI-NEXT: sxtb w13, w24 ; CHECK-GI-NEXT: mov v0.s[2], wzr -; CHECK-GI-NEXT: mov v20.s[2], w24 -; CHECK-GI-NEXT: ldp x24, x23, [sp, #16] // 16-byte Folded Reload -; CHECK-GI-NEXT: mov v18.s[3], w6 -; CHECK-GI-NEXT: mov v21.s[2], wzr -; CHECK-GI-NEXT: mul v2.4s, v2.4s, v16.4s -; CHECK-GI-NEXT: mul v4.4s, v4.4s, v17.4s -; CHECK-GI-NEXT: mov v1.s[3], w8 -; CHECK-GI-NEXT: mov v6.s[3], w9 -; CHECK-GI-NEXT: mov v7.s[3], w12 -; CHECK-GI-NEXT: mov v19.s[3], w15 -; CHECK-GI-NEXT: mov v3.s[3], w17 -; CHECK-GI-NEXT: mov v20.s[3], w25 +; CHECK-GI-NEXT: mov v7.h[6], w11 +; CHECK-GI-NEXT: fmov w11, s17 +; CHECK-GI-NEXT: mov v4.h[6], w10 +; CHECK-GI-NEXT: sxtb w10, w7 +; CHECK-GI-NEXT: mov v5.h[6], w12 +; CHECK-GI-NEXT: mov v16.h[6], w13 +; CHECK-GI-NEXT: mov v3.h[7], w8 +; CHECK-GI-NEXT: sxtb w8, w6 +; CHECK-GI-NEXT: smov w12, v1.b[8] +; CHECK-GI-NEXT: mov v6.h[7], w10 +; CHECK-GI-NEXT: sxtb w10, w17 +; CHECK-GI-NEXT: sxtb w11, w11 +; CHECK-GI-NEXT: mov v4.h[7], w9 +; CHECK-GI-NEXT: mov v7.h[7], w8 +; CHECK-GI-NEXT: smov w8, v2.b[8] +; CHECK-GI-NEXT: mov v5.h[7], w10 +; CHECK-GI-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v16.h[7], w11 ; CHECK-GI-NEXT: mov v0.s[3], wzr -; CHECK-GI-NEXT: mul v5.4s, v5.4s, v18.4s -; CHECK-GI-NEXT: mov v21.s[3], wzr -; CHECK-GI-NEXT: mla v2.4s, v1.4s, v19.4s -; CHECK-GI-NEXT: mla v4.4s, v6.4s, v3.4s -; CHECK-GI-NEXT: mla v5.4s, v7.4s, v20.4s -; CHECK-GI-NEXT: add v0.4s, v21.4s, v0.4s -; CHECK-GI-NEXT: add v1.4s, v2.4s, v4.4s -; CHECK-GI-NEXT: add v0.4s, v5.4s, v0.4s +; CHECK-GI-NEXT: mul v3.8h, v3.8h, v6.8h +; CHECK-GI-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: mul v2.8h, v4.8h, v7.8h +; CHECK-GI-NEXT: mul w16, w12, w8 +; CHECK-GI-NEXT: mul v1.8h, v5.8h, v16.8h +; CHECK-GI-NEXT: smov w17, v3.h[0] +; CHECK-GI-NEXT: smov w0, v3.h[4] +; CHECK-GI-NEXT: sxth w16, w16 +; CHECK-GI-NEXT: smov w2, v2.h[0] +; CHECK-GI-NEXT: smov w4, v2.h[4] +; CHECK-GI-NEXT: smov w18, v3.h[1] +; CHECK-GI-NEXT: smov w1, v3.h[5] +; CHECK-GI-NEXT: smov w3, v2.h[1] +; CHECK-GI-NEXT: smov w5, v2.h[5] +; CHECK-GI-NEXT: smov w6, v1.h[0] +; CHECK-GI-NEXT: smov w19, v1.h[4] +; CHECK-GI-NEXT: smov w7, v1.h[1] +; CHECK-GI-NEXT: smov w20, v1.h[5] +; CHECK-GI-NEXT: smov w10, v3.h[2] +; CHECK-GI-NEXT: smov w8, v3.h[3] +; CHECK-GI-NEXT: smov w11, v3.h[6] +; CHECK-GI-NEXT: smov w9, v3.h[7] +; CHECK-GI-NEXT: fmov s3, w17 +; CHECK-GI-NEXT: fmov s4, w0 +; CHECK-GI-NEXT: fmov s5, w2 +; CHECK-GI-NEXT: fmov s6, w4 +; CHECK-GI-NEXT: fmov s7, w6 +; CHECK-GI-NEXT: fmov s16, w19 +; CHECK-GI-NEXT: fmov s17, w16 +; CHECK-GI-NEXT: smov w12, v2.h[2] +; CHECK-GI-NEXT: smov w13, v2.h[6] +; CHECK-GI-NEXT: smov w14, v1.h[2] +; CHECK-GI-NEXT: smov w15, v1.h[6] +; CHECK-GI-NEXT: mov v3.s[1], w18 +; CHECK-GI-NEXT: mov v4.s[1], w1 +; CHECK-GI-NEXT: mov v5.s[1], w3 +; CHECK-GI-NEXT: mov v6.s[1], w5 +; CHECK-GI-NEXT: mov v7.s[1], w7 +; CHECK-GI-NEXT: mov v16.s[1], w20 +; CHECK-GI-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v17.s[1], wzr +; CHECK-GI-NEXT: smov w16, v2.h[3] +; CHECK-GI-NEXT: smov w17, v2.h[7] +; CHECK-GI-NEXT: smov w18, v1.h[3] +; CHECK-GI-NEXT: smov w0, v1.h[7] +; CHECK-GI-NEXT: mov v3.s[2], w10 +; CHECK-GI-NEXT: mov v4.s[2], w11 +; CHECK-GI-NEXT: mov v5.s[2], w12 +; CHECK-GI-NEXT: mov v6.s[2], w13 +; CHECK-GI-NEXT: mov v7.s[2], w14 +; CHECK-GI-NEXT: mov v16.s[2], w15 +; CHECK-GI-NEXT: mov v17.s[2], wzr +; CHECK-GI-NEXT: mov v3.s[3], w8 +; CHECK-GI-NEXT: mov v4.s[3], w9 +; CHECK-GI-NEXT: ldr w9, [sp, #12] // 4-byte Folded Reload +; CHECK-GI-NEXT: mov v5.s[3], w16 +; CHECK-GI-NEXT: mov v6.s[3], w17 +; CHECK-GI-NEXT: mov v7.s[3], w18 +; CHECK-GI-NEXT: mov v16.s[3], w0 +; CHECK-GI-NEXT: mov v17.s[3], wzr +; CHECK-GI-NEXT: add v1.4s, v3.4s, v4.4s +; CHECK-GI-NEXT: add v2.4s, v5.4s, v6.4s +; CHECK-GI-NEXT: add v3.4s, v7.4s, v16.4s +; CHECK-GI-NEXT: add v0.4s, v17.4s, v0.4s +; CHECK-GI-NEXT: add v1.4s, v1.4s, v2.4s +; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s ; CHECK-GI-NEXT: add v0.4s, v1.4s, v0.4s ; CHECK-GI-NEXT: addv s0, v0.4s ; CHECK-GI-NEXT: fmov w8, s0 -; CHECK-GI-NEXT: add w0, w8, w2 -; CHECK-GI-NEXT: ldp x26, x25, [sp], #64 // 16-byte Folded Reload +; CHECK-GI-NEXT: add w0, w8, w9 +; CHECK-GI-NEXT: add sp, sp, #112 ; CHECK-GI-NEXT: ret entry: %0 = load <25 x i8>, ptr %a @@ -2948,349 +3331,535 @@ define i32 @test_sdot_v25i8_double(<25 x i8> %a, <25 x i8> %b, <25 x i8> %c, <25 ; ; CHECK-GI-LABEL: test_sdot_v25i8_double: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: stp d11, d10, [sp, #-48]! // 16-byte Folded Spill -; CHECK-GI-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill -; CHECK-GI-NEXT: str x29, [sp, #32] // 8-byte Folded Spill -; CHECK-GI-NEXT: .cfi_def_cfa_offset 48 +; CHECK-GI-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 16 ; CHECK-GI-NEXT: .cfi_offset w29, -16 -; CHECK-GI-NEXT: .cfi_offset b8, -24 -; CHECK-GI-NEXT: .cfi_offset b9, -32 -; CHECK-GI-NEXT: .cfi_offset b10, -40 -; CHECK-GI-NEXT: .cfi_offset b11, -48 -; CHECK-GI-NEXT: sxtb w8, w0 -; CHECK-GI-NEXT: sxtb w10, w4 -; CHECK-GI-NEXT: sxtb w9, w1 -; CHECK-GI-NEXT: sxtb w11, w2 -; CHECK-GI-NEXT: sxtb w13, w6 -; CHECK-GI-NEXT: ldr w12, [sp, #72] +; CHECK-GI-NEXT: lsl w8, w0, #8 +; CHECK-GI-NEXT: ldr w9, [sp, #16] +; CHECK-GI-NEXT: lsl w10, w1, #8 +; CHECK-GI-NEXT: ldr w11, [sp, #24] +; CHECK-GI-NEXT: lsl w12, w4, #8 +; CHECK-GI-NEXT: ldr w13, [sp, #56] +; CHECK-GI-NEXT: sbfx w8, w8, #8, #8 +; CHECK-GI-NEXT: lsl w9, w9, #8 +; CHECK-GI-NEXT: sbfx w10, w10, #8, #8 +; CHECK-GI-NEXT: sbfx w12, w12, #8, #8 +; CHECK-GI-NEXT: lsl w13, w13, #8 +; CHECK-GI-NEXT: ldr w14, [sp, #64] ; CHECK-GI-NEXT: fmov s2, w8 -; CHECK-GI-NEXT: ldr w8, [sp, #48] -; CHECK-GI-NEXT: fmov s4, w10 -; CHECK-GI-NEXT: ldr w10, [sp, #80] -; CHECK-GI-NEXT: ldr w14, [sp, #128] -; CHECK-GI-NEXT: ldr w15, [sp, #152] -; CHECK-GI-NEXT: sxtb w8, w8 +; CHECK-GI-NEXT: sbfx w9, w9, #8, #8 +; CHECK-GI-NEXT: lsl w8, w11, #8 +; CHECK-GI-NEXT: lsl w11, w2, #8 +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: lsl w14, w14, #8 +; CHECK-GI-NEXT: fmov s4, w9 +; CHECK-GI-NEXT: sbfx w8, w8, #8, #8 +; CHECK-GI-NEXT: ldr w16, [sp, #112] +; CHECK-GI-NEXT: mov v2.h[1], w10 +; CHECK-GI-NEXT: ldr w10, [sp, #32] +; CHECK-GI-NEXT: sbfx w9, w11, #8, #8 +; CHECK-GI-NEXT: lsl w11, w3, #8 +; CHECK-GI-NEXT: sbfx w14, w14, #8, #8 ; CHECK-GI-NEXT: fmov s1, wzr +; CHECK-GI-NEXT: lsl w10, w10, #8 +; CHECK-GI-NEXT: mov v4.h[1], w8 +; CHECK-GI-NEXT: ldr w8, [sp, #152] +; CHECK-GI-NEXT: sbfx w11, w11, #8, #8 ; CHECK-GI-NEXT: fmov s0, wzr -; CHECK-GI-NEXT: mov v2.s[1], w9 -; CHECK-GI-NEXT: sxtb w9, w5 -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: fmov s3, w8 -; CHECK-GI-NEXT: ldr w8, [sp, #88] -; CHECK-GI-NEXT: ldr x29, [sp, #32] // 8-byte Folded Reload -; CHECK-GI-NEXT: mov v4.s[1], w9 -; CHECK-GI-NEXT: ldr w9, [sp, #56] -; CHECK-GI-NEXT: fmov s5, w10 -; CHECK-GI-NEXT: sxtb w8, w8 -; CHECK-GI-NEXT: sxtb w10, w3 +; CHECK-GI-NEXT: mov v2.h[2], w9 +; CHECK-GI-NEXT: ldr w9, [sp, #40] +; CHECK-GI-NEXT: sbfx w10, w10, #8, #8 +; CHECK-GI-NEXT: lsl w8, w8, #8 ; CHECK-GI-NEXT: mov v1.s[1], wzr -; CHECK-GI-NEXT: mov v2.s[2], w11 -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: ldr w11, [sp, #64] -; CHECK-GI-NEXT: mov v5.s[1], w8 -; CHECK-GI-NEXT: ldr w8, [sp, #104] +; CHECK-GI-NEXT: mov v4.h[2], w10 +; CHECK-GI-NEXT: lsl w9, w9, #8 +; CHECK-GI-NEXT: ldr w10, [sp, #160] +; CHECK-GI-NEXT: sbfx w8, w8, #8, #8 ; CHECK-GI-NEXT: mov v0.s[1], wzr -; CHECK-GI-NEXT: mov v3.s[1], w9 -; CHECK-GI-NEXT: ldr w9, [sp, #96] -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: mov v4.s[2], w13 -; CHECK-GI-NEXT: ldr w13, [sp, #120] -; CHECK-GI-NEXT: sxtb w8, w8 -; CHECK-GI-NEXT: mov v2.s[3], w10 -; CHECK-GI-NEXT: ldr w10, [sp, #112] -; CHECK-GI-NEXT: sxtb w9, w9 +; CHECK-GI-NEXT: mov v2.h[3], w11 +; CHECK-GI-NEXT: sbfx w9, w9, #8, #8 +; CHECK-GI-NEXT: ldr w11, [sp, #48] +; CHECK-GI-NEXT: lsl w10, w10, #8 +; CHECK-GI-NEXT: fmov s3, w8 ; CHECK-GI-NEXT: mov v1.s[2], wzr +; CHECK-GI-NEXT: mov v4.h[3], w9 +; CHECK-GI-NEXT: ldr w9, [sp, #80] +; CHECK-GI-NEXT: lsl w8, w11, #8 +; CHECK-GI-NEXT: sbfx w10, w10, #8, #8 +; CHECK-GI-NEXT: ldr w11, [sp, #168] ; CHECK-GI-NEXT: mov v0.s[2], wzr -; CHECK-GI-NEXT: mov v3.s[2], w11 -; CHECK-GI-NEXT: sxtb w11, w10 -; CHECK-GI-NEXT: mov v5.s[2], w9 -; CHECK-GI-NEXT: sxtb w9, w13 -; CHECK-GI-NEXT: ldr w13, [sp, #144] -; CHECK-GI-NEXT: ldr w10, [sp, #136] -; CHECK-GI-NEXT: fmov s6, w11 -; CHECK-GI-NEXT: sxtb w11, w7 -; CHECK-GI-NEXT: sxtb w13, w13 -; CHECK-GI-NEXT: sxtb w10, w10 +; CHECK-GI-NEXT: mov v2.h[4], w12 +; CHECK-GI-NEXT: lsl w12, w5, #8 +; CHECK-GI-NEXT: lsl w9, w9, #8 +; CHECK-GI-NEXT: sbfx w8, w8, #8, #8 +; CHECK-GI-NEXT: mov v3.h[1], w10 +; CHECK-GI-NEXT: ldr w10, [sp, #88] +; CHECK-GI-NEXT: lsl w11, w11, #8 +; CHECK-GI-NEXT: sbfx w12, w12, #8, #8 +; CHECK-GI-NEXT: sbfx w9, w9, #8, #8 +; CHECK-GI-NEXT: mov v4.h[4], w8 +; CHECK-GI-NEXT: lsl w8, w10, #8 +; CHECK-GI-NEXT: ldr w10, [sp, #176] +; CHECK-GI-NEXT: mov v2.h[5], w12 +; CHECK-GI-NEXT: sbfx w11, w11, #8, #8 +; CHECK-GI-NEXT: lsl w12, w6, #8 +; CHECK-GI-NEXT: fmov s6, w9 +; CHECK-GI-NEXT: sbfx w15, w8, #8, #8 +; CHECK-GI-NEXT: lsl w9, w10, #8 +; CHECK-GI-NEXT: mov v3.h[2], w11 +; CHECK-GI-NEXT: sbfx w11, w12, #8, #8 +; CHECK-GI-NEXT: ldr w10, [sp, #96] +; CHECK-GI-NEXT: sbfx w9, w9, #8, #8 +; CHECK-GI-NEXT: mov v4.h[5], w13 +; CHECK-GI-NEXT: ldr w13, [sp, #224] +; CHECK-GI-NEXT: mov v6.h[1], w15 +; CHECK-GI-NEXT: mov v2.h[6], w11 +; CHECK-GI-NEXT: lsl w15, w7, #8 +; CHECK-GI-NEXT: lsl w10, w10, #8 +; CHECK-GI-NEXT: ldr w11, [sp, #184] +; CHECK-GI-NEXT: ldr w12, [sp, #104] +; CHECK-GI-NEXT: mov v3.h[3], w9 +; CHECK-GI-NEXT: ldr w9, [sp, #216] +; CHECK-GI-NEXT: sbfx w15, w15, #8, #8 +; CHECK-GI-NEXT: sbfx w10, w10, #8, #8 +; CHECK-GI-NEXT: lsl w11, w11, #8 +; CHECK-GI-NEXT: lsl w12, w12, #8 +; CHECK-GI-NEXT: mov v2.h[7], w15 +; CHECK-GI-NEXT: lsl w15, w9, #8 +; CHECK-GI-NEXT: mov v4.h[6], w14 +; CHECK-GI-NEXT: mov v6.h[2], w10 +; CHECK-GI-NEXT: lsl w10, w13, #8 +; CHECK-GI-NEXT: sbfx w11, w11, #8, #8 +; CHECK-GI-NEXT: sbfx w13, w15, #8, #8 +; CHECK-GI-NEXT: sbfx w12, w12, #8, #8 +; CHECK-GI-NEXT: ldr w14, [sp, #288] +; CHECK-GI-NEXT: sbfx w10, w10, #8, #8 +; CHECK-GI-NEXT: mov v3.h[4], w11 +; CHECK-GI-NEXT: ldr w11, [sp, #192] +; CHECK-GI-NEXT: fmov s5, w13 +; CHECK-GI-NEXT: ldr w13, [sp, #232] +; CHECK-GI-NEXT: ldr w9, [sp, #120] +; CHECK-GI-NEXT: lsl w11, w11, #8 +; CHECK-GI-NEXT: mov v6.h[3], w12 +; CHECK-GI-NEXT: ldr w8, [sp, #72] +; CHECK-GI-NEXT: lsl w13, w13, #8 +; CHECK-GI-NEXT: lsl w9, w9, #8 ; CHECK-GI-NEXT: mov v1.s[3], wzr -; CHECK-GI-NEXT: mov v5.s[3], w8 -; CHECK-GI-NEXT: ldr w8, [sp, #184] -; CHECK-GI-NEXT: mov v4.s[3], w11 -; CHECK-GI-NEXT: mov v6.s[1], w9 -; CHECK-GI-NEXT: fmov s7, w13 -; CHECK-GI-NEXT: ldr w13, [sp, #216] -; CHECK-GI-NEXT: sxtb w9, w12 -; CHECK-GI-NEXT: sxtb w12, w14 -; CHECK-GI-NEXT: sxtb w14, w15 -; CHECK-GI-NEXT: sxtb w8, w8 -; CHECK-GI-NEXT: sxtb w13, w13 -; CHECK-GI-NEXT: ldr w11, [sp, #160] -; CHECK-GI-NEXT: mov v7.s[1], w14 -; CHECK-GI-NEXT: ldr w14, [sp, #224] -; CHECK-GI-NEXT: mov v3.s[3], w9 -; CHECK-GI-NEXT: mov v6.s[2], w12 -; CHECK-GI-NEXT: ldr w12, [sp, #192] -; CHECK-GI-NEXT: fmov s16, w8 -; CHECK-GI-NEXT: fmov s18, w13 -; CHECK-GI-NEXT: sxtb w14, w14 -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: ldr w9, [sp, #168] -; CHECK-GI-NEXT: ldr w13, [sp, #208] -; CHECK-GI-NEXT: mov v7.s[2], w11 -; CHECK-GI-NEXT: ldr w11, [sp, #256] -; CHECK-GI-NEXT: ldr w8, [sp, #176] -; CHECK-GI-NEXT: mov v16.s[1], w12 -; CHECK-GI-NEXT: ldr w12, [sp, #200] -; CHECK-GI-NEXT: mov v18.s[1], w14 -; CHECK-GI-NEXT: ldr w14, [sp, #232] -; CHECK-GI-NEXT: mov v6.s[3], w10 -; CHECK-GI-NEXT: ldr w10, [sp, #248] -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: sxtb w14, w14 -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: sxtb w13, w13 -; CHECK-GI-NEXT: mov v16.s[2], w12 -; CHECK-GI-NEXT: ldr w12, [sp, #240] -; CHECK-GI-NEXT: mov v7.s[3], w9 -; CHECK-GI-NEXT: mov v18.s[2], w14 -; CHECK-GI-NEXT: fmov s17, w10 +; CHECK-GI-NEXT: mov v5.h[1], w10 +; CHECK-GI-NEXT: ldr w10, [sp, #280] +; CHECK-GI-NEXT: sbfx w15, w11, #8, #8 +; CHECK-GI-NEXT: sbfx w12, w13, #8, #8 +; CHECK-GI-NEXT: lsl w13, w14, #8 +; CHECK-GI-NEXT: ldr w14, [sp, #240] +; CHECK-GI-NEXT: lsl w10, w10, #8 +; CHECK-GI-NEXT: mov v3.h[5], w15 +; CHECK-GI-NEXT: lsl w15, w16, #8 +; CHECK-GI-NEXT: lsl w14, w14, #8 +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: sbfx w9, w9, #8, #8 +; CHECK-GI-NEXT: sbfx w10, w10, #8, #8 +; CHECK-GI-NEXT: mov v5.h[2], w12 +; CHECK-GI-NEXT: ldr w12, [sp, #296] +; CHECK-GI-NEXT: sbfx w14, w14, #8, #8 +; CHECK-GI-NEXT: sbfx w15, w15, #8, #8 +; CHECK-GI-NEXT: lsl w8, w8, #8 +; CHECK-GI-NEXT: fmov s7, w10 +; CHECK-GI-NEXT: ldr w10, [sp, #200] +; CHECK-GI-NEXT: lsl w12, w12, #8 +; CHECK-GI-NEXT: mov v6.h[4], w15 +; CHECK-GI-NEXT: ldr w15, [sp, #304] +; CHECK-GI-NEXT: ldr w11, [sp, #128] +; CHECK-GI-NEXT: lsl w10, w10, #8 +; CHECK-GI-NEXT: mov v5.h[3], w14 +; CHECK-GI-NEXT: ldr w14, [sp, #208] +; CHECK-GI-NEXT: mov v7.h[1], w13 +; CHECK-GI-NEXT: ldr w13, [sp, #248] +; CHECK-GI-NEXT: sbfx w12, w12, #8, #8 +; CHECK-GI-NEXT: sbfx w10, w10, #8, #8 +; CHECK-GI-NEXT: lsl w15, w15, #8 +; CHECK-GI-NEXT: sbfx w8, w8, #8, #8 +; CHECK-GI-NEXT: lsl w13, w13, #8 +; CHECK-GI-NEXT: mov v6.h[5], w9 +; CHECK-GI-NEXT: ldr w9, [sp, #272] +; CHECK-GI-NEXT: mov v3.h[6], w10 +; CHECK-GI-NEXT: lsl w10, w14, #8 +; CHECK-GI-NEXT: sbfx w14, w15, #8, #8 +; CHECK-GI-NEXT: mov v7.h[2], w12 +; CHECK-GI-NEXT: ldr w12, [sp, #256] +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: sbfx w10, w10, #8, #8 +; CHECK-GI-NEXT: ldr w15, [sp, #320] +; CHECK-GI-NEXT: lsl w9, w9, #8 +; CHECK-GI-NEXT: mov v5.h[4], w13 +; CHECK-GI-NEXT: lsl w12, w12, #8 +; CHECK-GI-NEXT: ldr w13, [sp, #312] +; CHECK-GI-NEXT: mov v3.h[7], w10 +; CHECK-GI-NEXT: lsl w11, w11, #8 +; CHECK-GI-NEXT: mov v4.h[7], w8 +; CHECK-GI-NEXT: mov v7.h[3], w14 ; CHECK-GI-NEXT: ldr w14, [sp, #264] -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: ldr w9, [sp, #288] -; CHECK-GI-NEXT: ldr w10, [sp, #272] -; CHECK-GI-NEXT: sxtb w14, w14 -; CHECK-GI-NEXT: sxtb w8, w8 -; CHECK-GI-NEXT: ldr w15, [sp, #392] -; CHECK-GI-NEXT: mov v17.s[1], w11 -; CHECK-GI-NEXT: ldr w11, [sp, #280] -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: mov v18.s[3], w12 -; CHECK-GI-NEXT: ldr w12, [sp, #312] -; CHECK-GI-NEXT: mov v16.s[3], w13 -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: ldr w13, [sp, #296] +; CHECK-GI-NEXT: sbfx w12, w12, #8, #8 +; CHECK-GI-NEXT: lsl w13, w13, #8 +; CHECK-GI-NEXT: sbfx w8, w9, #8, #8 +; CHECK-GI-NEXT: ldr w16, [sp, #136] +; CHECK-GI-NEXT: lsl w14, w14, #8 +; CHECK-GI-NEXT: mov v5.h[5], w12 +; CHECK-GI-NEXT: sbfx w11, w11, #8, #8 +; CHECK-GI-NEXT: mul v16.8h, v2.8h, v3.8h +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: lsl w16, w16, #8 +; CHECK-GI-NEXT: sbfx w12, w14, #8, #8 +; CHECK-GI-NEXT: lsl w14, w15, #8 +; CHECK-GI-NEXT: mov v6.h[6], w11 +; CHECK-GI-NEXT: mov v7.h[4], w13 +; CHECK-GI-NEXT: ldr w13, [sp, #328] +; CHECK-GI-NEXT: ldr w10, [sp, #144] +; CHECK-GI-NEXT: mov v5.h[6], w12 +; CHECK-GI-NEXT: ldr w12, [sp, #336] +; CHECK-GI-NEXT: sbfx w14, w14, #8, #8 +; CHECK-GI-NEXT: smov w9, v16.h[0] +; CHECK-GI-NEXT: smov w15, v16.h[4] +; CHECK-GI-NEXT: lsl w13, w13, #8 +; CHECK-GI-NEXT: smov w17, v16.h[5] ; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: sxtb w12, w12 ; CHECK-GI-NEXT: mov v0.s[3], wzr -; CHECK-GI-NEXT: mov v17.s[2], w14 -; CHECK-GI-NEXT: ldr w14, [sp, #320] -; CHECK-GI-NEXT: fmov s20, w11 -; CHECK-GI-NEXT: ldr w11, [sp, #344] -; CHECK-GI-NEXT: fmov s19, w12 -; CHECK-GI-NEXT: sxtb w13, w13 -; CHECK-GI-NEXT: sxtb w14, w14 -; CHECK-GI-NEXT: ldr w12, [sp, #304] -; CHECK-GI-NEXT: mul v4.4s, v4.4s, v18.4s -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: mov v20.s[1], w9 -; CHECK-GI-NEXT: ldr w9, [sp, #352] -; CHECK-GI-NEXT: mov v19.s[1], w14 -; CHECK-GI-NEXT: ldr w14, [sp, #328] -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: fmov s21, w11 -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: ldr w11, [sp, #336] +; CHECK-GI-NEXT: sbfx w11, w13, #8, #8 +; CHECK-GI-NEXT: smov w13, v16.h[1] +; CHECK-GI-NEXT: mov v7.h[5], w14 +; CHECK-GI-NEXT: mov v5.h[7], w8 +; CHECK-GI-NEXT: ldr w14, [sp, #344] +; CHECK-GI-NEXT: ldr w8, [sp, #352] +; CHECK-GI-NEXT: fmov s2, w9 +; CHECK-GI-NEXT: fmov s3, w15 +; CHECK-GI-NEXT: lsl w9, w12, #8 +; CHECK-GI-NEXT: sbfx w12, w16, #8, #8 ; CHECK-GI-NEXT: sxtb w14, w14 -; CHECK-GI-NEXT: mov v17.s[3], w10 -; CHECK-GI-NEXT: ldr w10, [sp, #376] -; CHECK-GI-NEXT: mov v20.s[2], w13 -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: ldr w13, [sp, #368] -; CHECK-GI-NEXT: mov v21.s[1], w9 -; CHECK-GI-NEXT: ldr w9, [sp, #360] -; CHECK-GI-NEXT: mov v19.s[2], w14 -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: ldr w14, [sp, #384] -; CHECK-GI-NEXT: mla v4.4s, v2.4s, v16.4s -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: mov v20.s[3], w12 -; CHECK-GI-NEXT: sxtb w12, w13 -; CHECK-GI-NEXT: mul w10, w8, w10 -; CHECK-GI-NEXT: mov v21.s[2], w9 -; CHECK-GI-NEXT: mov v19.s[3], w11 -; CHECK-GI-NEXT: ldr w11, [sp, #416] -; CHECK-GI-NEXT: sxtb w13, w14 -; CHECK-GI-NEXT: sxtb w14, w15 -; CHECK-GI-NEXT: ldr w9, [sp, #400] -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: fmov s22, w10 -; CHECK-GI-NEXT: ldr w10, [sp, #432] -; CHECK-GI-NEXT: fmov s23, w13 -; CHECK-GI-NEXT: ldr w13, [sp, #448] -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: mov v21.s[3], w12 -; CHECK-GI-NEXT: ldr w12, [sp, #424] -; CHECK-GI-NEXT: fmov s25, w11 -; CHECK-GI-NEXT: ldr w11, [sp, #480] -; CHECK-GI-NEXT: sxtb w13, w13 -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: mov v23.s[1], w14 -; CHECK-GI-NEXT: ldr w14, [sp, #456] -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: fmov s24, w13 -; CHECK-GI-NEXT: ldr w13, [sp, #440] -; CHECK-GI-NEXT: mov v25.s[1], w12 +; CHECK-GI-NEXT: lsl w8, w8, #8 +; CHECK-GI-NEXT: mov v7.h[6], w11 +; CHECK-GI-NEXT: ldr w11, [sp, #360] +; CHECK-GI-NEXT: smov w15, v16.h[3] +; CHECK-GI-NEXT: mov v2.s[1], w13 +; CHECK-GI-NEXT: smov w13, v16.h[2] +; CHECK-GI-NEXT: mov v6.h[7], w12 +; CHECK-GI-NEXT: smov w12, v16.h[6] +; CHECK-GI-NEXT: mov v3.s[1], w17 +; CHECK-GI-NEXT: mul v18.8h, v4.8h, v5.8h +; CHECK-GI-NEXT: lsl w11, w11, #8 +; CHECK-GI-NEXT: sbfx w16, w9, #8, #8 +; CHECK-GI-NEXT: ldr w9, [sp, #368] +; CHECK-GI-NEXT: mov v2.s[2], w13 +; CHECK-GI-NEXT: smov w13, v16.h[7] +; CHECK-GI-NEXT: sbfx w11, w11, #8, #8 +; CHECK-GI-NEXT: mov v3.s[2], w12 +; CHECK-GI-NEXT: sbfx w12, w8, #8, #8 +; CHECK-GI-NEXT: mul w8, w10, w14 +; CHECK-GI-NEXT: smov w10, v18.h[0] +; CHECK-GI-NEXT: lsl w9, w9, #8 +; CHECK-GI-NEXT: ldr w14, [sp, #376] +; CHECK-GI-NEXT: fmov s16, w12 +; CHECK-GI-NEXT: smov w12, v18.h[1] +; CHECK-GI-NEXT: mov v7.h[7], w16 +; CHECK-GI-NEXT: mov v2.s[3], w15 +; CHECK-GI-NEXT: smov w15, v18.h[4] +; CHECK-GI-NEXT: sbfx w9, w9, #8, #8 +; CHECK-GI-NEXT: mov v3.s[3], w13 +; CHECK-GI-NEXT: ldr w13, [sp, #416] +; CHECK-GI-NEXT: lsl w14, w14, #8 +; CHECK-GI-NEXT: fmov s4, w10 +; CHECK-GI-NEXT: mov v16.h[1], w11 +; CHECK-GI-NEXT: ldr w10, [sp, #424] +; CHECK-GI-NEXT: lsl w13, w13, #8 +; CHECK-GI-NEXT: ldr w11, [sp, #384] +; CHECK-GI-NEXT: sbfx w14, w14, #8, #8 +; CHECK-GI-NEXT: fmov s5, w15 +; CHECK-GI-NEXT: lsl w10, w10, #8 +; CHECK-GI-NEXT: ldr w15, [sp, #432] +; CHECK-GI-NEXT: mov v4.s[1], w12 +; CHECK-GI-NEXT: smov w12, v18.h[5] +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: mov v16.h[2], w9 +; CHECK-GI-NEXT: sbfx w10, w10, #8, #8 +; CHECK-GI-NEXT: lsl w15, w15, #8 +; CHECK-GI-NEXT: fmov s17, w13 +; CHECK-GI-NEXT: lsl w11, w11, #8 +; CHECK-GI-NEXT: mul v7.8h, v6.8h, v7.8h +; CHECK-GI-NEXT: sbfx w15, w15, #8, #8 +; CHECK-GI-NEXT: ldr w9, [sp, #392] +; CHECK-GI-NEXT: ldr w13, [sp, #400] +; CHECK-GI-NEXT: mov v5.s[1], w12 +; CHECK-GI-NEXT: smov w12, v18.h[2] +; CHECK-GI-NEXT: sbfx w11, w11, #8, #8 +; CHECK-GI-NEXT: mov v17.h[1], w10 +; CHECK-GI-NEXT: mov v16.h[3], w14 +; CHECK-GI-NEXT: ldr w10, [sp, #440] +; CHECK-GI-NEXT: smov w14, v18.h[6] +; CHECK-GI-NEXT: lsl w9, w9, #8 +; CHECK-GI-NEXT: ldr w16, [sp, #456] +; CHECK-GI-NEXT: lsl w10, w10, #8 +; CHECK-GI-NEXT: sxth w8, w8 +; CHECK-GI-NEXT: add v2.4s, v2.4s, v3.4s +; CHECK-GI-NEXT: mov v4.s[2], w12 +; CHECK-GI-NEXT: smov w12, v18.h[3] +; CHECK-GI-NEXT: sbfx w9, w9, #8, #8 +; CHECK-GI-NEXT: mov v17.h[2], w15 +; CHECK-GI-NEXT: mov v16.h[4], w11 +; CHECK-GI-NEXT: sbfx w10, w10, #8, #8 +; CHECK-GI-NEXT: mov v5.s[2], w14 +; CHECK-GI-NEXT: smov w14, v18.h[7] +; CHECK-GI-NEXT: ldr w15, [sp, #448] +; CHECK-GI-NEXT: ldr w11, [sp, #408] +; CHECK-GI-NEXT: mov v4.s[3], w12 +; CHECK-GI-NEXT: smov w12, v7.h[0] +; CHECK-GI-NEXT: mov v17.h[3], w10 +; CHECK-GI-NEXT: ldr w10, [sp, #480] +; CHECK-GI-NEXT: mov v16.h[5], w9 +; CHECK-GI-NEXT: lsl w9, w13, #8 +; CHECK-GI-NEXT: lsl w13, w15, #8 +; CHECK-GI-NEXT: mov v5.s[3], w14 +; CHECK-GI-NEXT: lsl w10, w10, #8 +; CHECK-GI-NEXT: smov w14, v7.h[1] +; CHECK-GI-NEXT: lsl w15, w16, #8 +; CHECK-GI-NEXT: fmov s6, w12 ; CHECK-GI-NEXT: ldr w12, [sp, #488] -; CHECK-GI-NEXT: sxtb w14, w14 -; CHECK-GI-NEXT: fmov s26, w11 -; CHECK-GI-NEXT: ldr w15, [sp, #504] -; CHECK-GI-NEXT: ldr w11, [sp, #472] -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: mov v24.s[1], w14 +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: sbfx w10, w10, #8, #8 +; CHECK-GI-NEXT: sbfx w9, w9, #8, #8 +; CHECK-GI-NEXT: sbfx w15, w15, #8, #8 +; CHECK-GI-NEXT: lsl w12, w12, #8 +; CHECK-GI-NEXT: mov v17.h[4], w13 +; CHECK-GI-NEXT: ldr w13, [sp, #496] +; CHECK-GI-NEXT: fmov s18, w10 +; CHECK-GI-NEXT: ldr w10, [sp, #552] +; CHECK-GI-NEXT: mov v6.s[1], w14 +; CHECK-GI-NEXT: sbfx w12, w12, #8, #8 ; CHECK-GI-NEXT: ldr w14, [sp, #464] -; CHECK-GI-NEXT: mov v23.s[2], w9 -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: ldr w8, [sp, #408] -; CHECK-GI-NEXT: mov v26.s[1], w12 -; CHECK-GI-NEXT: ldr w12, [sp, #496] -; CHECK-GI-NEXT: mov v25.s[2], w10 +; CHECK-GI-NEXT: mov v16.h[6], w9 +; CHECK-GI-NEXT: lsl w10, w10, #8 +; CHECK-GI-NEXT: lsl w11, w11, #8 +; CHECK-GI-NEXT: lsl w13, w13, #8 +; CHECK-GI-NEXT: mov v18.h[1], w12 +; CHECK-GI-NEXT: ldr w12, [sp, #560] +; CHECK-GI-NEXT: mov v17.h[5], w15 +; CHECK-GI-NEXT: sbfx w15, w10, #8, #8 +; CHECK-GI-NEXT: lsl w14, w14, #8 +; CHECK-GI-NEXT: sbfx w11, w11, #8, #8 +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: lsl w12, w12, #8 ; CHECK-GI-NEXT: ldr w10, [sp, #512] -; CHECK-GI-NEXT: sxtb w9, w14 -; CHECK-GI-NEXT: ldr w14, [sp, #520] -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: sxtb w8, w8 -; CHECK-GI-NEXT: mov v22.s[1], wzr -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: mov v24.s[2], w9 -; CHECK-GI-NEXT: ldr w9, [sp, #528] -; CHECK-GI-NEXT: mov v26.s[2], w12 -; CHECK-GI-NEXT: sxtb w12, w13 -; CHECK-GI-NEXT: sxtb w13, w15 -; CHECK-GI-NEXT: fmov s27, w10 -; CHECK-GI-NEXT: ldr w10, [sp, #584] -; CHECK-GI-NEXT: ldr w15, [sp, #552] -; CHECK-GI-NEXT: mov v25.s[3], w12 -; CHECK-GI-NEXT: ldr w12, [sp, #544] -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: mov v24.s[3], w11 -; CHECK-GI-NEXT: ldr w11, [sp, #560] -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: mov v26.s[3], w13 -; CHECK-GI-NEXT: sxtb w13, w14 -; CHECK-GI-NEXT: sxtb w14, w15 -; CHECK-GI-NEXT: fmov s29, w10 -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: fmov s28, w12 -; CHECK-GI-NEXT: ldr w12, [sp, #616] -; CHECK-GI-NEXT: mov v27.s[1], w13 -; CHECK-GI-NEXT: ldr w13, [sp, #592] -; CHECK-GI-NEXT: ldr w15, [sp, #568] -; CHECK-GI-NEXT: mov v23.s[3], w8 -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: ldr w8, [sp, #536] -; CHECK-GI-NEXT: ldr w10, [sp, #576] -; CHECK-GI-NEXT: mov v28.s[1], w14 -; CHECK-GI-NEXT: ldr w14, [sp, #624] -; CHECK-GI-NEXT: sxtb w13, w13 -; CHECK-GI-NEXT: fmov s30, w12 -; CHECK-GI-NEXT: ldr w12, [sp, #600] -; CHECK-GI-NEXT: mov v27.s[2], w9 -; CHECK-GI-NEXT: mov v29.s[1], w13 -; CHECK-GI-NEXT: sxtb w13, w14 -; CHECK-GI-NEXT: sxtb w14, w15 -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: ldr w9, [sp, #608] -; CHECK-GI-NEXT: sxtb w8, w8 -; CHECK-GI-NEXT: mov v30.s[1], w13 -; CHECK-GI-NEXT: ldr w13, [sp, #632] -; CHECK-GI-NEXT: mov v28.s[2], w11 -; CHECK-GI-NEXT: ldr w11, [sp, #640] -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: sxtb w13, w13 -; CHECK-GI-NEXT: mov v29.s[2], w12 -; CHECK-GI-NEXT: ldr w12, [sp, #648] -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: mov v27.s[3], w8 -; CHECK-GI-NEXT: ldr w8, [sp, #664] -; CHECK-GI-NEXT: mov v30.s[2], w13 -; CHECK-GI-NEXT: mov v28.s[3], w14 +; CHECK-GI-NEXT: fmov s19, w15 +; CHECK-GI-NEXT: ldr w15, [sp, #616] +; CHECK-GI-NEXT: sbfx w14, w14, #8, #8 +; CHECK-GI-NEXT: mov v16.h[7], w11 +; CHECK-GI-NEXT: ldr w11, [sp, #504] +; CHECK-GI-NEXT: mov v18.h[2], w13 +; CHECK-GI-NEXT: ldr w13, [sp, #568] +; CHECK-GI-NEXT: sbfx w12, w12, #8, #8 +; CHECK-GI-NEXT: mov v17.h[6], w14 +; CHECK-GI-NEXT: lsl w14, w15, #8 +; CHECK-GI-NEXT: lsl w11, w11, #8 +; CHECK-GI-NEXT: ldr w15, [sp, #576] +; CHECK-GI-NEXT: mov v19.h[1], w12 +; CHECK-GI-NEXT: ldr w12, [sp, #624] +; CHECK-GI-NEXT: lsl w13, w13, #8 +; CHECK-GI-NEXT: sbfx w14, w14, #8, #8 +; CHECK-GI-NEXT: sbfx w16, w11, #8, #8 +; CHECK-GI-NEXT: lsl w10, w10, #8 +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: lsl w12, w12, #8 +; CHECK-GI-NEXT: lsl w15, w15, #8 +; CHECK-GI-NEXT: fmov s20, w14 ; CHECK-GI-NEXT: ldr w14, [sp, #680] -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: ldr w13, [sp, #656] -; CHECK-GI-NEXT: sxtb w8, w8 -; CHECK-GI-NEXT: sxtb w14, w14 -; CHECK-GI-NEXT: mov v29.s[3], w9 -; CHECK-GI-NEXT: ldr w9, [sp, #688] -; CHECK-GI-NEXT: fmov s31, w12 -; CHECK-GI-NEXT: sxtb w13, w13 -; CHECK-GI-NEXT: ldr w12, [sp, #752] -; CHECK-GI-NEXT: mov v30.s[3], w11 -; CHECK-GI-NEXT: ldr w11, [sp, #744] -; CHECK-GI-NEXT: fmov s8, w14 -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: ldr w14, [sp, #712] -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: mov v31.s[1], w13 +; CHECK-GI-NEXT: mov v18.h[3], w16 +; CHECK-GI-NEXT: sbfx w12, w12, #8, #8 +; CHECK-GI-NEXT: mov v19.h[2], w13 +; CHECK-GI-NEXT: ldr w13, [sp, #632] +; CHECK-GI-NEXT: lsl w14, w14, #8 +; CHECK-GI-NEXT: sbfx w10, w10, #8, #8 +; CHECK-GI-NEXT: sbfx w15, w15, #8, #8 +; CHECK-GI-NEXT: mov v20.h[1], w12 +; CHECK-GI-NEXT: ldr w12, [sp, #688] +; CHECK-GI-NEXT: lsl w13, w13, #8 +; CHECK-GI-NEXT: sbfx w14, w14, #8, #8 +; CHECK-GI-NEXT: mov v18.h[4], w10 +; CHECK-GI-NEXT: ldr w10, [sp, #584] +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: lsl w12, w12, #8 +; CHECK-GI-NEXT: mov v19.h[3], w15 +; CHECK-GI-NEXT: fmov s21, w14 +; CHECK-GI-NEXT: ldr w15, [sp, #640] +; CHECK-GI-NEXT: lsl w10, w10, #8 +; CHECK-GI-NEXT: mov v20.h[2], w13 ; CHECK-GI-NEXT: ldr w13, [sp, #696] -; CHECK-GI-NEXT: mov v8.s[1], w9 -; CHECK-GI-NEXT: sxtb w14, w14 -; CHECK-GI-NEXT: ldr w9, [sp, #720] -; CHECK-GI-NEXT: fmov s9, w11 -; CHECK-GI-NEXT: ldr w11, [sp, #776] -; CHECK-GI-NEXT: sxtb w13, w13 -; CHECK-GI-NEXT: fmov s10, w14 -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: mov v22.s[2], wzr -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: mov v31.s[2], w8 -; CHECK-GI-NEXT: ldr w8, [sp, #704] -; CHECK-GI-NEXT: mov v9.s[1], w12 -; CHECK-GI-NEXT: ldr w12, [sp, #760] -; CHECK-GI-NEXT: mov v8.s[2], w13 -; CHECK-GI-NEXT: mul w10, w10, w11 -; CHECK-GI-NEXT: mov v10.s[1], w9 -; CHECK-GI-NEXT: ldr w9, [sp, #728] -; CHECK-GI-NEXT: sxtb w11, w12 -; CHECK-GI-NEXT: sxtb w8, w8 -; CHECK-GI-NEXT: mul v5.4s, v5.4s, v20.4s -; CHECK-GI-NEXT: mul v7.4s, v7.4s, v21.4s -; CHECK-GI-NEXT: mul v18.4s, v25.4s, v30.4s -; CHECK-GI-NEXT: mov v22.s[3], wzr -; CHECK-GI-NEXT: fmov s11, w10 -; CHECK-GI-NEXT: mov v9.s[2], w11 -; CHECK-GI-NEXT: ldr w10, [sp, #768] -; CHECK-GI-NEXT: mov v8.s[3], w8 -; CHECK-GI-NEXT: sxtb w8, w9 -; CHECK-GI-NEXT: ldr w9, [sp, #672] +; CHECK-GI-NEXT: sbfx w12, w12, #8, #8 +; CHECK-GI-NEXT: ldr w11, [sp, #520] +; CHECK-GI-NEXT: sbfx w10, w10, #8, #8 +; CHECK-GI-NEXT: lsl w15, w15, #8 +; CHECK-GI-NEXT: lsl w13, w13, #8 +; CHECK-GI-NEXT: mov v21.h[1], w12 +; CHECK-GI-NEXT: ldr w12, [sp, #592] +; CHECK-GI-NEXT: sbfx w15, w15, #8, #8 +; CHECK-GI-NEXT: mov v19.h[4], w10 +; CHECK-GI-NEXT: ldr w10, [sp, #704] +; CHECK-GI-NEXT: lsl w11, w11, #8 +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: lsl w12, w12, #8 +; CHECK-GI-NEXT: mov v20.h[3], w15 +; CHECK-GI-NEXT: ldr w15, [sp, #648] +; CHECK-GI-NEXT: lsl w10, w10, #8 +; CHECK-GI-NEXT: sbfx w11, w11, #8, #8 +; CHECK-GI-NEXT: mov v21.h[2], w13 +; CHECK-GI-NEXT: ldr w13, [sp, #600] +; CHECK-GI-NEXT: sbfx w12, w12, #8, #8 +; CHECK-GI-NEXT: lsl w15, w15, #8 +; CHECK-GI-NEXT: sbfx w10, w10, #8, #8 +; CHECK-GI-NEXT: mov v18.h[5], w11 +; CHECK-GI-NEXT: ldr w11, [sp, #712] +; CHECK-GI-NEXT: lsl w13, w13, #8 +; CHECK-GI-NEXT: mov v19.h[5], w12 +; CHECK-GI-NEXT: sbfx w15, w15, #8, #8 +; CHECK-GI-NEXT: ldr w12, [sp, #656] +; CHECK-GI-NEXT: lsl w11, w11, #8 +; CHECK-GI-NEXT: mov v21.h[3], w10 +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: ldr w10, [sp, #608] +; CHECK-GI-NEXT: mov v20.h[4], w15 +; CHECK-GI-NEXT: lsl w12, w12, #8 +; CHECK-GI-NEXT: sbfx w11, w11, #8, #8 +; CHECK-GI-NEXT: ldr w14, [sp, #528] +; CHECK-GI-NEXT: ldr w15, [sp, #664] +; CHECK-GI-NEXT: mov v19.h[6], w13 +; CHECK-GI-NEXT: ldr w13, [sp, #720] +; CHECK-GI-NEXT: lsl w10, w10, #8 +; CHECK-GI-NEXT: sbfx w12, w12, #8, #8 +; CHECK-GI-NEXT: mov v21.h[4], w11 +; CHECK-GI-NEXT: lsl w14, w14, #8 +; CHECK-GI-NEXT: lsl w13, w13, #8 +; CHECK-GI-NEXT: sbfx w16, w10, #8, #8 +; CHECK-GI-NEXT: lsl w15, w15, #8 +; CHECK-GI-NEXT: mov v20.h[5], w12 +; CHECK-GI-NEXT: ldr w12, [sp, #728] +; CHECK-GI-NEXT: sbfx w14, w14, #8, #8 +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: mov v19.h[7], w16 +; CHECK-GI-NEXT: ldr w9, [sp, #472] +; CHECK-GI-NEXT: lsl w12, w12, #8 +; CHECK-GI-NEXT: mov v18.h[6], w14 +; CHECK-GI-NEXT: sbfx w14, w15, #8, #8 +; CHECK-GI-NEXT: mov v21.h[5], w13 +; CHECK-GI-NEXT: ldr w15, [sp, #672] +; CHECK-GI-NEXT: ldr w11, [sp, #536] +; CHECK-GI-NEXT: ldr w13, [sp, #736] +; CHECK-GI-NEXT: sbfx w12, w12, #8, #8 +; CHECK-GI-NEXT: lsl w9, w9, #8 +; CHECK-GI-NEXT: mov v20.h[6], w14 +; CHECK-GI-NEXT: lsl w15, w15, #8 +; CHECK-GI-NEXT: lsl w11, w11, #8 +; CHECK-GI-NEXT: mul v19.8h, v16.8h, v19.8h +; CHECK-GI-NEXT: lsl w13, w13, #8 +; CHECK-GI-NEXT: sbfx w9, w9, #8, #8 +; CHECK-GI-NEXT: mov v21.h[6], w12 +; CHECK-GI-NEXT: sbfx w15, w15, #8, #8 +; CHECK-GI-NEXT: smov w14, v7.h[2] +; CHECK-GI-NEXT: sbfx w11, w11, #8, #8 +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: smov w12, v7.h[4] +; CHECK-GI-NEXT: mov v17.h[7], w9 +; CHECK-GI-NEXT: mov v20.h[7], w15 +; CHECK-GI-NEXT: smov w9, v7.h[5] +; CHECK-GI-NEXT: mov v18.h[7], w11 +; CHECK-GI-NEXT: smov w11, v19.h[4] +; CHECK-GI-NEXT: ldr w15, [sp, #744] +; CHECK-GI-NEXT: mov v21.h[7], w13 +; CHECK-GI-NEXT: mov v6.s[2], w14 +; CHECK-GI-NEXT: smov w14, v19.h[0] +; CHECK-GI-NEXT: fmov s16, w12 +; CHECK-GI-NEXT: smov w13, v19.h[5] +; CHECK-GI-NEXT: smov w12, v19.h[1] +; CHECK-GI-NEXT: mul v20.8h, v17.8h, v20.8h +; CHECK-GI-NEXT: ldr w10, [sp, #544] +; CHECK-GI-NEXT: add v3.4s, v4.4s, v5.4s +; CHECK-GI-NEXT: mul v22.8h, v18.8h, v21.8h +; CHECK-GI-NEXT: fmov s18, w11 +; CHECK-GI-NEXT: mov v16.s[1], w9 +; CHECK-GI-NEXT: fmov s17, w14 +; CHECK-GI-NEXT: smov w14, v7.h[6] +; CHECK-GI-NEXT: smov w11, v19.h[2] +; CHECK-GI-NEXT: smov w9, v7.h[3] ; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: mla v5.4s, v3.4s, v17.4s -; CHECK-GI-NEXT: mov v11.s[1], wzr -; CHECK-GI-NEXT: mov v10.s[2], w8 -; CHECK-GI-NEXT: ldr w8, [sp, #736] -; CHECK-GI-NEXT: mov v9.s[3], w10 -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: mla v7.4s, v6.4s, v19.4s -; CHECK-GI-NEXT: sxtb w8, w8 -; CHECK-GI-NEXT: mul v20.4s, v26.4s, v8.4s -; CHECK-GI-NEXT: mla v18.4s, v23.4s, v29.4s -; CHECK-GI-NEXT: mov v31.s[3], w9 -; CHECK-GI-NEXT: add v1.4s, v22.4s, v1.4s -; CHECK-GI-NEXT: add v2.4s, v4.4s, v5.4s -; CHECK-GI-NEXT: mov v11.s[2], wzr -; CHECK-GI-NEXT: mov v10.s[3], w8 -; CHECK-GI-NEXT: mul v21.4s, v28.4s, v9.4s -; CHECK-GI-NEXT: ldp d9, d8, [sp, #16] // 16-byte Folded Reload -; CHECK-GI-NEXT: add v1.4s, v7.4s, v1.4s -; CHECK-GI-NEXT: mla v20.4s, v24.4s, v31.4s -; CHECK-GI-NEXT: mov v11.s[3], wzr -; CHECK-GI-NEXT: mla v21.4s, v27.4s, v10.4s +; CHECK-GI-NEXT: fmov s21, w8 +; CHECK-GI-NEXT: mov v18.s[1], w13 +; CHECK-GI-NEXT: sxtb w13, w15 +; CHECK-GI-NEXT: smov w15, v20.h[0] +; CHECK-GI-NEXT: mov v17.s[1], w12 +; CHECK-GI-NEXT: smov w8, v7.h[7] +; CHECK-GI-NEXT: smov w12, v19.h[6] +; CHECK-GI-NEXT: mov v16.s[2], w14 +; CHECK-GI-NEXT: smov w14, v20.h[1] +; CHECK-GI-NEXT: mul w10, w10, w13 +; CHECK-GI-NEXT: smov w13, v20.h[4] +; CHECK-GI-NEXT: smov w16, v20.h[5] +; CHECK-GI-NEXT: mov v21.s[1], wzr +; CHECK-GI-NEXT: fmov s7, w15 +; CHECK-GI-NEXT: smov w15, v20.h[2] +; CHECK-GI-NEXT: mov v6.s[3], w9 +; CHECK-GI-NEXT: mov v17.s[2], w11 +; CHECK-GI-NEXT: smov w11, v22.h[0] +; CHECK-GI-NEXT: sxth w10, w10 +; CHECK-GI-NEXT: mov v18.s[2], w12 +; CHECK-GI-NEXT: smov w12, v22.h[1] +; CHECK-GI-NEXT: mov v16.s[3], w8 +; CHECK-GI-NEXT: mov v7.s[1], w14 +; CHECK-GI-NEXT: smov w14, v22.h[4] +; CHECK-GI-NEXT: fmov s23, w13 +; CHECK-GI-NEXT: smov w13, v22.h[5] +; CHECK-GI-NEXT: fmov s26, w10 +; CHECK-GI-NEXT: smov w10, v19.h[7] +; CHECK-GI-NEXT: fmov s24, w11 +; CHECK-GI-NEXT: smov w11, v20.h[6] +; CHECK-GI-NEXT: mov v21.s[2], wzr +; CHECK-GI-NEXT: mov v23.s[1], w16 +; CHECK-GI-NEXT: add v4.4s, v6.4s, v16.4s +; CHECK-GI-NEXT: add v2.4s, v2.4s, v3.4s +; CHECK-GI-NEXT: fmov s25, w14 +; CHECK-GI-NEXT: smov w14, v22.h[2] +; CHECK-GI-NEXT: mov v26.s[1], wzr +; CHECK-GI-NEXT: mov v24.s[1], w12 +; CHECK-GI-NEXT: smov w12, v19.h[3] +; CHECK-GI-NEXT: mov v7.s[2], w15 +; CHECK-GI-NEXT: smov w15, v20.h[3] +; CHECK-GI-NEXT: mov v18.s[3], w10 +; CHECK-GI-NEXT: mov v21.s[3], wzr +; CHECK-GI-NEXT: mov v25.s[1], w13 +; CHECK-GI-NEXT: smov w13, v22.h[6] +; CHECK-GI-NEXT: mov v23.s[2], w11 +; CHECK-GI-NEXT: smov w11, v20.h[7] +; CHECK-GI-NEXT: mov v26.s[2], wzr +; CHECK-GI-NEXT: mov v24.s[2], w14 +; CHECK-GI-NEXT: smov w14, v22.h[3] +; CHECK-GI-NEXT: mov v17.s[3], w12 +; CHECK-GI-NEXT: mov v7.s[3], w15 +; CHECK-GI-NEXT: add v1.4s, v21.4s, v1.4s +; CHECK-GI-NEXT: mov v25.s[2], w13 +; CHECK-GI-NEXT: smov w13, v22.h[7] +; CHECK-GI-NEXT: mov v23.s[3], w11 +; CHECK-GI-NEXT: mov v26.s[3], wzr +; CHECK-GI-NEXT: mov v24.s[3], w14 +; CHECK-GI-NEXT: add v5.4s, v17.4s, v18.4s +; CHECK-GI-NEXT: add v1.4s, v4.4s, v1.4s +; CHECK-GI-NEXT: mov v25.s[3], w13 +; CHECK-GI-NEXT: add v6.4s, v7.4s, v23.4s +; CHECK-GI-NEXT: add v0.4s, v26.4s, v0.4s ; CHECK-GI-NEXT: add v1.4s, v2.4s, v1.4s -; CHECK-GI-NEXT: add v3.4s, v18.4s, v20.4s -; CHECK-GI-NEXT: add v0.4s, v11.4s, v0.4s +; CHECK-GI-NEXT: add v7.4s, v24.4s, v25.4s +; CHECK-GI-NEXT: add v3.4s, v5.4s, v6.4s ; CHECK-GI-NEXT: addv s1, v1.4s -; CHECK-GI-NEXT: add v0.4s, v21.4s, v0.4s +; CHECK-GI-NEXT: add v0.4s, v7.4s, v0.4s ; CHECK-GI-NEXT: fmov w8, s1 ; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s ; CHECK-GI-NEXT: addv s0, v0.4s ; CHECK-GI-NEXT: fmov w9, s0 ; CHECK-GI-NEXT: add w0, w8, w9 -; CHECK-GI-NEXT: ldp d11, d10, [sp], #48 // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-GI-NEXT: ret entry: %az = sext <25 x i8> %a to <25 x i32> @@ -3972,197 +4541,412 @@ define i32 @test_udot_v33i8(ptr nocapture readonly %a, ptr nocapture readonly %b ; ; CHECK-GI-LABEL: test_udot_v33i8: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill -; CHECK-GI-NEXT: .cfi_def_cfa_offset 16 -; CHECK-GI-NEXT: .cfi_offset b8, -16 -; CHECK-GI-NEXT: ldp q21, q25, [x1] +; CHECK-GI-NEXT: sub sp, sp, #112 +; CHECK-GI-NEXT: stp x29, x30, [sp, #16] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 112 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w20, -16 +; CHECK-GI-NEXT: .cfi_offset w21, -24 +; CHECK-GI-NEXT: .cfi_offset w22, -32 +; CHECK-GI-NEXT: .cfi_offset w23, -40 +; CHECK-GI-NEXT: .cfi_offset w24, -48 +; CHECK-GI-NEXT: .cfi_offset w25, -56 +; CHECK-GI-NEXT: .cfi_offset w26, -64 +; CHECK-GI-NEXT: .cfi_offset w27, -72 +; CHECK-GI-NEXT: .cfi_offset w28, -80 +; CHECK-GI-NEXT: .cfi_offset w30, -88 +; CHECK-GI-NEXT: .cfi_offset w29, -96 +; CHECK-GI-NEXT: ldp q7, q16, [x1] ; CHECK-GI-NEXT: fmov s5, wzr -; CHECK-GI-NEXT: ldp q26, q22, [x0] +; CHECK-GI-NEXT: str w2, [sp, #12] // 4-byte Folded Spill ; CHECK-GI-NEXT: fmov s6, wzr ; CHECK-GI-NEXT: fmov s0, wzr ; CHECK-GI-NEXT: fmov s1, wzr ; CHECK-GI-NEXT: fmov s3, wzr -; CHECK-GI-NEXT: umov w8, v21.b[0] -; CHECK-GI-NEXT: umov w9, v21.b[4] -; CHECK-GI-NEXT: umov w10, v21.b[1] -; CHECK-GI-NEXT: umov w13, v21.b[8] -; CHECK-GI-NEXT: umov w11, v21.b[5] -; CHECK-GI-NEXT: umov w14, v21.b[9] -; CHECK-GI-NEXT: umov w15, v25.b[0] -; CHECK-GI-NEXT: umov w12, v21.b[2] ; CHECK-GI-NEXT: fmov s2, wzr +; CHECK-GI-NEXT: mov b23, v7.b[7] +; CHECK-GI-NEXT: mov b17, v7.b[1] +; CHECK-GI-NEXT: fmov w11, s7 +; CHECK-GI-NEXT: mov b18, v7.b[2] +; CHECK-GI-NEXT: mov b19, v7.b[3] +; CHECK-GI-NEXT: mov b20, v7.b[4] +; CHECK-GI-NEXT: mov b21, v7.b[5] +; CHECK-GI-NEXT: mov b22, v7.b[6] +; CHECK-GI-NEXT: mov b24, v7.b[8] +; CHECK-GI-NEXT: uxtb w11, w11 +; CHECK-GI-NEXT: mov b25, v7.b[9] +; CHECK-GI-NEXT: mov b26, v7.b[10] +; CHECK-GI-NEXT: mov b27, v7.b[11] +; CHECK-GI-NEXT: mov b28, v7.b[12] +; CHECK-GI-NEXT: mov b29, v7.b[13] +; CHECK-GI-NEXT: mov b30, v7.b[14] +; CHECK-GI-NEXT: mov b7, v7.b[15] +; CHECK-GI-NEXT: fmov w7, s23 +; CHECK-GI-NEXT: mov b23, v16.b[7] +; CHECK-GI-NEXT: fmov w10, s17 +; CHECK-GI-NEXT: fmov w9, s18 +; CHECK-GI-NEXT: fmov w13, s19 +; CHECK-GI-NEXT: fmov w8, s24 +; CHECK-GI-NEXT: mov b17, v16.b[2] +; CHECK-GI-NEXT: fmov w12, s20 +; CHECK-GI-NEXT: fmov w16, s25 +; CHECK-GI-NEXT: fmov w23, s21 +; CHECK-GI-NEXT: uxtb w10, w10 +; CHECK-GI-NEXT: uxtb w9, w9 +; CHECK-GI-NEXT: mov b18, v16.b[1] +; CHECK-GI-NEXT: stp s23, s7, [sp, #4] // 8-byte Folded Spill +; CHECK-GI-NEXT: uxtb w8, w8 +; CHECK-GI-NEXT: fmov s7, w11 +; CHECK-GI-NEXT: fmov w5, s17 +; CHECK-GI-NEXT: fmov w27, s26 +; CHECK-GI-NEXT: mov b21, v16.b[5] +; CHECK-GI-NEXT: fmov s17, w8 +; CHECK-GI-NEXT: uxtb w8, w12 +; CHECK-GI-NEXT: fmov w20, s22 +; CHECK-GI-NEXT: mov v7.h[1], w10 +; CHECK-GI-NEXT: uxtb w10, w16 +; CHECK-GI-NEXT: mov b19, v16.b[3] +; CHECK-GI-NEXT: mov b22, v16.b[4] +; CHECK-GI-NEXT: mov b20, v16.b[6] +; CHECK-GI-NEXT: fmov w21, s27 +; CHECK-GI-NEXT: mov v17.h[1], w10 +; CHECK-GI-NEXT: fmov w24, s28 +; CHECK-GI-NEXT: mov b24, v16.b[8] +; CHECK-GI-NEXT: fmov w22, s29 +; CHECK-GI-NEXT: mov b26, v16.b[9] +; CHECK-GI-NEXT: fmov w4, s30 +; CHECK-GI-NEXT: uxtb w10, w21 +; CHECK-GI-NEXT: mov v7.h[2], w9 +; CHECK-GI-NEXT: uxtb w9, w13 +; CHECK-GI-NEXT: str s20, [sp] // 4-byte Folded Spill +; CHECK-GI-NEXT: mov b25, v16.b[10] +; CHECK-GI-NEXT: fmov w25, s18 +; CHECK-GI-NEXT: uxtb w22, w22 +; CHECK-GI-NEXT: mov b27, v16.b[11] +; CHECK-GI-NEXT: mov b28, v16.b[12] +; CHECK-GI-NEXT: mov b29, v16.b[13] +; CHECK-GI-NEXT: mov b30, v16.b[14] +; CHECK-GI-NEXT: fmov w26, s16 +; CHECK-GI-NEXT: mov v7.h[3], w9 +; CHECK-GI-NEXT: uxtb w9, w27 +; CHECK-GI-NEXT: mov b31, v16.b[15] +; CHECK-GI-NEXT: ldp q18, q16, [x0] +; CHECK-GI-NEXT: fmov w2, s21 +; CHECK-GI-NEXT: uxtb w26, w26 +; CHECK-GI-NEXT: mov v17.h[2], w9 +; CHECK-GI-NEXT: fmov w14, s22 +; CHECK-GI-NEXT: fmov w3, s25 +; CHECK-GI-NEXT: fmov w15, s19 +; CHECK-GI-NEXT: fmov w19, s24 +; CHECK-GI-NEXT: mov v7.h[4], w8 +; CHECK-GI-NEXT: uxtb w8, w23 +; CHECK-GI-NEXT: mov b21, v18.b[2] +; CHECK-GI-NEXT: mov b22, v18.b[1] +; CHECK-GI-NEXT: mov b25, v18.b[5] +; CHECK-GI-NEXT: mov b23, v18.b[6] +; CHECK-GI-NEXT: uxtb w19, w19 +; CHECK-GI-NEXT: uxtb w3, w3 +; CHECK-GI-NEXT: mov v17.h[3], w10 +; CHECK-GI-NEXT: uxtb w10, w24 +; CHECK-GI-NEXT: uxtb w24, w7 +; CHECK-GI-NEXT: mov b19, v18.b[3] +; CHECK-GI-NEXT: mov v7.h[5], w8 +; CHECK-GI-NEXT: uxtb w8, w20 +; CHECK-GI-NEXT: fmov w29, s21 +; CHECK-GI-NEXT: mov b21, v18.b[10] +; CHECK-GI-NEXT: fmov w9, s22 +; CHECK-GI-NEXT: fmov w6, s26 +; CHECK-GI-NEXT: mov v17.h[4], w10 +; CHECK-GI-NEXT: uxtb w10, w25 +; CHECK-GI-NEXT: fmov w17, s27 +; CHECK-GI-NEXT: mov b26, v18.b[4] +; CHECK-GI-NEXT: fmov w18, s28 +; CHECK-GI-NEXT: fmov w16, s29 +; CHECK-GI-NEXT: mov v7.h[6], w8 +; CHECK-GI-NEXT: fmov w8, s18 +; CHECK-GI-NEXT: mov b24, v18.b[7] +; CHECK-GI-NEXT: fmov w30, s21 +; CHECK-GI-NEXT: mov b20, v18.b[8] +; CHECK-GI-NEXT: mov b27, v18.b[9] +; CHECK-GI-NEXT: uxtb w16, w16 +; CHECK-GI-NEXT: mov b28, v18.b[11] +; CHECK-GI-NEXT: mov b29, v18.b[12] +; CHECK-GI-NEXT: fmov w23, s25 +; CHECK-GI-NEXT: mov b25, v18.b[13] +; CHECK-GI-NEXT: fmov w21, s23 +; CHECK-GI-NEXT: mov v7.h[7], w24 +; CHECK-GI-NEXT: uxtb w24, w8 +; CHECK-GI-NEXT: uxtb w8, w9 +; CHECK-GI-NEXT: uxtb w9, w29 +; CHECK-GI-NEXT: mov b23, v18.b[14] +; CHECK-GI-NEXT: mov b22, v18.b[15] +; CHECK-GI-NEXT: fmov s21, w24 +; CHECK-GI-NEXT: fmov s18, w26 +; CHECK-GI-NEXT: fmov w28, s19 +; CHECK-GI-NEXT: mov b19, v16.b[1] +; CHECK-GI-NEXT: mov v17.h[5], w22 +; CHECK-GI-NEXT: fmov w7, s20 +; CHECK-GI-NEXT: fmov w11, s27 +; CHECK-GI-NEXT: fmov w27, s26 +; CHECK-GI-NEXT: mov b20, v16.b[2] +; CHECK-GI-NEXT: mov v21.h[1], w8 +; CHECK-GI-NEXT: uxtb w8, w4 +; CHECK-GI-NEXT: mov v18.h[1], w10 +; CHECK-GI-NEXT: uxtb w10, w5 +; CHECK-GI-NEXT: uxtb w7, w7 +; CHECK-GI-NEXT: fmov w24, s23 +; CHECK-GI-NEXT: mov b23, v16.b[6] +; CHECK-GI-NEXT: fmov w4, s22 +; CHECK-GI-NEXT: mov b22, v16.b[8] +; CHECK-GI-NEXT: mov v17.h[6], w8 +; CHECK-GI-NEXT: fmov w8, s19 +; CHECK-GI-NEXT: fmov s19, w19 +; CHECK-GI-NEXT: mov v21.h[2], w9 +; CHECK-GI-NEXT: uxtb w9, w28 +; CHECK-GI-NEXT: mov v18.h[2], w10 +; CHECK-GI-NEXT: uxtb w10, w6 +; CHECK-GI-NEXT: mov b27, v16.b[9] +; CHECK-GI-NEXT: fmov w20, s24 +; CHECK-GI-NEXT: uxtb w8, w8 +; CHECK-GI-NEXT: mov b24, v16.b[3] +; CHECK-GI-NEXT: fmov w5, s20 +; CHECK-GI-NEXT: mov v19.h[1], w10 +; CHECK-GI-NEXT: fmov w10, s23 +; CHECK-GI-NEXT: fmov s23, w7 +; CHECK-GI-NEXT: mov v21.h[3], w9 +; CHECK-GI-NEXT: uxtb w9, w11 +; CHECK-GI-NEXT: uxtb w11, w27 +; CHECK-GI-NEXT: uxtb w27, w30 +; CHECK-GI-NEXT: uxtb w5, w5 +; CHECK-GI-NEXT: fmov w7, s22 +; CHECK-GI-NEXT: uxtb w10, w10 +; CHECK-GI-NEXT: mov v23.h[1], w9 +; CHECK-GI-NEXT: fmov w9, s16 +; CHECK-GI-NEXT: mov b20, v16.b[10] +; CHECK-GI-NEXT: fmov w22, s28 +; CHECK-GI-NEXT: fmov w25, s25 +; CHECK-GI-NEXT: uxtb w7, w7 +; CHECK-GI-NEXT: mov v21.h[4], w11 +; CHECK-GI-NEXT: fmov w11, s27 +; CHECK-GI-NEXT: uxtb w9, w9 +; CHECK-GI-NEXT: mov b25, v16.b[5] +; CHECK-GI-NEXT: fmov w29, s24 +; CHECK-GI-NEXT: fmov s22, w7 +; CHECK-GI-NEXT: mov v23.h[2], w27 +; CHECK-GI-NEXT: mov b24, v16.b[11] +; CHECK-GI-NEXT: uxtb w11, w11 +; CHECK-GI-NEXT: fmov w27, s20 +; CHECK-GI-NEXT: fmov s20, w9 +; CHECK-GI-NEXT: fmov w26, s29 +; CHECK-GI-NEXT: mov b26, v16.b[4] +; CHECK-GI-NEXT: mov v19.h[2], w3 +; CHECK-GI-NEXT: uxtb w3, w29 +; CHECK-GI-NEXT: ldp x29, x30, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v22.h[1], w11 +; CHECK-GI-NEXT: uxtb w11, w15 +; CHECK-GI-NEXT: uxtb w15, w22 +; CHECK-GI-NEXT: uxtb w22, w23 +; CHECK-GI-NEXT: mov v20.h[1], w8 +; CHECK-GI-NEXT: fmov w6, s25 +; CHECK-GI-NEXT: mov v18.h[3], w11 +; CHECK-GI-NEXT: uxtb w11, w27 +; CHECK-GI-NEXT: mov v23.h[3], w15 +; CHECK-GI-NEXT: uxtb w15, w17 +; CHECK-GI-NEXT: uxtb w17, w21 +; CHECK-GI-NEXT: mov b25, v16.b[12] +; CHECK-GI-NEXT: fmov w28, s24 +; CHECK-GI-NEXT: mov v21.h[5], w22 +; CHECK-GI-NEXT: mov v22.h[2], w11 +; CHECK-GI-NEXT: uxtb w11, w14 +; CHECK-GI-NEXT: uxtb w14, w26 +; CHECK-GI-NEXT: mov v20.h[2], w5 +; CHECK-GI-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: fmov w19, s26 +; CHECK-GI-NEXT: mov v18.h[4], w11 +; CHECK-GI-NEXT: uxtb w11, w28 +; CHECK-GI-NEXT: mov v23.h[4], w14 +; CHECK-GI-NEXT: uxtb w14, w25 +; CHECK-GI-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov b26, v16.b[13] +; CHECK-GI-NEXT: fmov w7, s25 +; CHECK-GI-NEXT: mov v19.h[3], w15 +; CHECK-GI-NEXT: uxtb w15, w18 +; CHECK-GI-NEXT: uxtb w18, w19 +; CHECK-GI-NEXT: mov v21.h[6], w17 +; CHECK-GI-NEXT: uxtb w17, w20 +; CHECK-GI-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v22.h[3], w11 +; CHECK-GI-NEXT: uxtb w11, w2 +; CHECK-GI-NEXT: mov v20.h[3], w3 +; CHECK-GI-NEXT: mov v23.h[5], w14 +; CHECK-GI-NEXT: uxtb w14, w24 +; CHECK-GI-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v18.h[5], w11 +; CHECK-GI-NEXT: uxtb w11, w7 +; CHECK-GI-NEXT: fmov w8, s26 +; CHECK-GI-NEXT: mov v19.h[4], w15 +; CHECK-GI-NEXT: ldr w15, [sp] // 4-byte Folded Reload +; CHECK-GI-NEXT: mov v21.h[7], w17 +; CHECK-GI-NEXT: uxtb w17, w6 +; CHECK-GI-NEXT: mov v22.h[4], w11 +; CHECK-GI-NEXT: ldr w11, [sp, #8] // 4-byte Folded Reload +; CHECK-GI-NEXT: uxtb w8, w8 +; CHECK-GI-NEXT: uxtb w15, w15 +; CHECK-GI-NEXT: fmov w13, s30 +; CHECK-GI-NEXT: uxtb w11, w11 +; CHECK-GI-NEXT: mov v20.h[4], w18 +; CHECK-GI-NEXT: mov v23.h[6], w14 +; CHECK-GI-NEXT: mov v19.h[5], w16 +; CHECK-GI-NEXT: mov b27, v16.b[14] +; CHECK-GI-NEXT: mul v24.8h, v7.8h, v21.8h +; CHECK-GI-NEXT: mov v22.h[5], w8 +; CHECK-GI-NEXT: uxtb w8, w4 +; CHECK-GI-NEXT: mov b7, v16.b[7] +; CHECK-GI-NEXT: mov b16, v16.b[15] +; CHECK-GI-NEXT: fmov w12, s31 +; CHECK-GI-NEXT: mov v17.h[7], w11 +; CHECK-GI-NEXT: uxtb w11, w13 +; CHECK-GI-NEXT: ldr w13, [sp, #4] // 4-byte Folded Reload +; CHECK-GI-NEXT: mov v20.h[5], w17 +; CHECK-GI-NEXT: mov v23.h[7], w8 +; CHECK-GI-NEXT: fmov w9, s27 +; CHECK-GI-NEXT: mov v18.h[6], w15 +; CHECK-GI-NEXT: uxtb w8, w12 +; CHECK-GI-NEXT: uxtb w13, w13 +; CHECK-GI-NEXT: mov v19.h[6], w11 +; CHECK-GI-NEXT: fmov w12, s16 +; CHECK-GI-NEXT: fmov w11, s7 ; CHECK-GI-NEXT: fmov s4, wzr +; CHECK-GI-NEXT: uxtb w9, w9 +; CHECK-GI-NEXT: mov v20.h[6], w10 +; CHECK-GI-NEXT: umov w10, v24.h[0] +; CHECK-GI-NEXT: mul v21.8h, v17.8h, v23.8h +; CHECK-GI-NEXT: mov v18.h[7], w13 ; CHECK-GI-NEXT: mov v5.s[1], wzr +; CHECK-GI-NEXT: uxtb w11, w11 +; CHECK-GI-NEXT: mov v19.h[7], w8 +; CHECK-GI-NEXT: uxtb w8, w12 +; CHECK-GI-NEXT: umov w12, v24.h[4] +; CHECK-GI-NEXT: mov v22.h[6], w9 +; CHECK-GI-NEXT: umov w9, v24.h[1] +; CHECK-GI-NEXT: mov v20.h[7], w11 +; CHECK-GI-NEXT: umov w11, v24.h[5] +; CHECK-GI-NEXT: fmov s7, w10 +; CHECK-GI-NEXT: ldrb w10, [x1, #32] +; CHECK-GI-NEXT: umov w13, v21.h[0] +; CHECK-GI-NEXT: umov w14, v21.h[1] +; CHECK-GI-NEXT: umov w15, v21.h[4] ; CHECK-GI-NEXT: mov v6.s[1], wzr -; CHECK-GI-NEXT: fmov s7, w8 -; CHECK-GI-NEXT: fmov s17, w9 -; CHECK-GI-NEXT: umov w8, v21.b[6] -; CHECK-GI-NEXT: fmov s16, w13 -; CHECK-GI-NEXT: umov w9, v21.b[3] -; CHECK-GI-NEXT: umov w13, v21.b[7] -; CHECK-GI-NEXT: fmov s18, w15 -; CHECK-GI-NEXT: umov w15, v25.b[4] ; CHECK-GI-NEXT: mov v0.s[1], wzr -; CHECK-GI-NEXT: mov v7.s[1], w10 -; CHECK-GI-NEXT: umov w10, v21.b[12] -; CHECK-GI-NEXT: mov v17.s[1], w11 -; CHECK-GI-NEXT: umov w11, v21.b[13] -; CHECK-GI-NEXT: mov v16.s[1], w14 -; CHECK-GI-NEXT: umov w14, v25.b[1] +; CHECK-GI-NEXT: fmov s16, w12 +; CHECK-GI-NEXT: mov v22.h[7], w8 +; CHECK-GI-NEXT: umov w12, v24.h[6] +; CHECK-GI-NEXT: umov w8, v24.h[2] +; CHECK-GI-NEXT: mov v7.s[1], w9 +; CHECK-GI-NEXT: ldrb w9, [x0, #32] +; CHECK-GI-NEXT: fmov s17, w13 +; CHECK-GI-NEXT: mul v23.8h, v18.8h, v20.8h +; CHECK-GI-NEXT: umov w13, v24.h[7] +; CHECK-GI-NEXT: mov v16.s[1], w11 +; CHECK-GI-NEXT: umov w11, v21.h[5] +; CHECK-GI-NEXT: fmov s18, w15 +; CHECK-GI-NEXT: mul v19.8h, v19.8h, v22.8h +; CHECK-GI-NEXT: umov w15, v21.h[6] ; CHECK-GI-NEXT: mov v1.s[1], wzr +; CHECK-GI-NEXT: mov v17.s[1], w14 +; CHECK-GI-NEXT: umov w14, v21.h[2] +; CHECK-GI-NEXT: mov v7.s[2], w8 +; CHECK-GI-NEXT: mul w8, w10, w9 +; CHECK-GI-NEXT: umov w9, v23.h[0] +; CHECK-GI-NEXT: umov w10, v23.h[1] +; CHECK-GI-NEXT: mov v16.s[2], w12 +; CHECK-GI-NEXT: umov w12, v21.h[3] +; CHECK-GI-NEXT: mov v18.s[1], w11 +; CHECK-GI-NEXT: umov w11, v23.h[4] ; CHECK-GI-NEXT: mov v3.s[1], wzr ; CHECK-GI-NEXT: mov v2.s[1], wzr -; CHECK-GI-NEXT: fmov s20, w15 -; CHECK-GI-NEXT: umov w15, v25.b[13] +; CHECK-GI-NEXT: mov v17.s[2], w14 +; CHECK-GI-NEXT: umov w14, v23.h[5] ; CHECK-GI-NEXT: mov v4.s[1], wzr -; CHECK-GI-NEXT: fmov s19, w10 -; CHECK-GI-NEXT: mov v7.s[2], w12 -; CHECK-GI-NEXT: umov w12, v21.b[10] -; CHECK-GI-NEXT: mov v18.s[1], w14 -; CHECK-GI-NEXT: umov w14, v25.b[5] -; CHECK-GI-NEXT: mov v17.s[2], w8 -; CHECK-GI-NEXT: umov w8, v21.b[11] -; CHECK-GI-NEXT: umov w10, v21.b[14] +; CHECK-GI-NEXT: fmov s20, w9 +; CHECK-GI-NEXT: umov w9, v19.h[1] ; CHECK-GI-NEXT: mov v5.s[2], wzr -; CHECK-GI-NEXT: mov v19.s[1], w11 -; CHECK-GI-NEXT: umov w11, v25.b[2] +; CHECK-GI-NEXT: mov v16.s[3], w13 +; CHECK-GI-NEXT: umov w13, v19.h[0] +; CHECK-GI-NEXT: mov v18.s[2], w15 +; CHECK-GI-NEXT: umov w15, v21.h[7] +; CHECK-GI-NEXT: fmov s21, w11 +; CHECK-GI-NEXT: umov w11, v23.h[2] +; CHECK-GI-NEXT: mov v17.s[3], w12 +; CHECK-GI-NEXT: umov w12, v19.h[4] +; CHECK-GI-NEXT: mov v20.s[1], w10 +; CHECK-GI-NEXT: umov w10, v23.h[3] ; CHECK-GI-NEXT: mov v6.s[2], wzr -; CHECK-GI-NEXT: mov v16.s[2], w12 -; CHECK-GI-NEXT: umov w12, v25.b[8] -; CHECK-GI-NEXT: mov v7.s[3], w9 -; CHECK-GI-NEXT: mov v20.s[1], w14 -; CHECK-GI-NEXT: umov w14, v21.b[15] -; CHECK-GI-NEXT: umov w9, v25.b[9] -; CHECK-GI-NEXT: mov v17.s[3], w13 -; CHECK-GI-NEXT: umov w13, v25.b[12] +; CHECK-GI-NEXT: umov w16, v24.h[3] +; CHECK-GI-NEXT: fmov s22, w13 +; CHECK-GI-NEXT: umov w13, v19.h[5] +; CHECK-GI-NEXT: mov v21.s[1], w14 +; CHECK-GI-NEXT: umov w14, v23.h[6] +; CHECK-GI-NEXT: mov v18.s[3], w15 +; CHECK-GI-NEXT: umov w15, v19.h[2] +; CHECK-GI-NEXT: mov v20.s[2], w11 +; CHECK-GI-NEXT: umov w11, v19.h[6] ; CHECK-GI-NEXT: mov v0.s[2], wzr -; CHECK-GI-NEXT: mov v18.s[2], w11 -; CHECK-GI-NEXT: umov w11, v26.b[0] -; CHECK-GI-NEXT: mov v19.s[2], w10 -; CHECK-GI-NEXT: fmov s21, w12 -; CHECK-GI-NEXT: umov w12, v26.b[1] -; CHECK-GI-NEXT: mov v16.s[3], w8 -; CHECK-GI-NEXT: umov w8, v26.b[5] -; CHECK-GI-NEXT: umov w10, v25.b[6] +; CHECK-GI-NEXT: mov v22.s[1], w9 +; CHECK-GI-NEXT: umov w9, v23.h[7] +; CHECK-GI-NEXT: fmov s23, w12 +; CHECK-GI-NEXT: umov w12, v19.h[3] ; CHECK-GI-NEXT: mov v1.s[2], wzr -; CHECK-GI-NEXT: fmov s23, w13 -; CHECK-GI-NEXT: umov w13, v25.b[3] ; CHECK-GI-NEXT: mov v3.s[2], wzr -; CHECK-GI-NEXT: fmov s24, w11 -; CHECK-GI-NEXT: mov v21.s[1], w9 -; CHECK-GI-NEXT: umov w9, v25.b[10] -; CHECK-GI-NEXT: umov w11, v26.b[2] -; CHECK-GI-NEXT: mov v19.s[3], w14 -; CHECK-GI-NEXT: umov w14, v26.b[13] -; CHECK-GI-NEXT: mov v23.s[1], w15 -; CHECK-GI-NEXT: umov w15, v25.b[14] -; CHECK-GI-NEXT: mov v20.s[2], w10 -; CHECK-GI-NEXT: mov v24.s[1], w12 -; CHECK-GI-NEXT: umov w12, v26.b[4] -; CHECK-GI-NEXT: umov w10, v25.b[7] -; CHECK-GI-NEXT: mov v21.s[2], w9 -; CHECK-GI-NEXT: umov w9, v25.b[11] -; CHECK-GI-NEXT: mov v18.s[3], w13 -; CHECK-GI-NEXT: umov w13, v26.b[9] +; CHECK-GI-NEXT: mov v21.s[2], w14 ; CHECK-GI-NEXT: mov v2.s[2], wzr ; CHECK-GI-NEXT: mov v4.s[2], wzr -; CHECK-GI-NEXT: mov v23.s[2], w15 -; CHECK-GI-NEXT: umov w15, v25.b[15] +; CHECK-GI-NEXT: mov v23.s[1], w13 ; CHECK-GI-NEXT: mov v5.s[3], wzr -; CHECK-GI-NEXT: fmov s27, w12 -; CHECK-GI-NEXT: mov v24.s[2], w11 -; CHECK-GI-NEXT: umov w11, v26.b[6] -; CHECK-GI-NEXT: umov w12, v26.b[8] -; CHECK-GI-NEXT: mov v21.s[3], w9 -; CHECK-GI-NEXT: umov w9, v26.b[12] -; CHECK-GI-NEXT: mov v20.s[3], w10 -; CHECK-GI-NEXT: umov w10, v26.b[3] ; CHECK-GI-NEXT: mov v6.s[3], wzr -; CHECK-GI-NEXT: mov v27.s[1], w8 -; CHECK-GI-NEXT: mov v23.s[3], w15 -; CHECK-GI-NEXT: umov w15, v22.b[0] -; CHECK-GI-NEXT: umov w8, v26.b[7] +; CHECK-GI-NEXT: mov v22.s[2], w15 +; CHECK-GI-NEXT: mov v7.s[3], w16 +; CHECK-GI-NEXT: mov v20.s[3], w10 ; CHECK-GI-NEXT: mov v0.s[3], wzr ; CHECK-GI-NEXT: mov v1.s[3], wzr -; CHECK-GI-NEXT: fmov s25, w12 -; CHECK-GI-NEXT: fmov s29, w9 -; CHECK-GI-NEXT: umov w9, v22.b[5] -; CHECK-GI-NEXT: mov v24.s[3], w10 -; CHECK-GI-NEXT: umov w10, v22.b[1] -; CHECK-GI-NEXT: umov w12, v26.b[10] -; CHECK-GI-NEXT: mov v27.s[2], w11 -; CHECK-GI-NEXT: umov w11, v22.b[4] -; CHECK-GI-NEXT: fmov s28, w15 -; CHECK-GI-NEXT: mov v25.s[1], w13 -; CHECK-GI-NEXT: umov w13, v26.b[14] -; CHECK-GI-NEXT: mov v29.s[1], w14 -; CHECK-GI-NEXT: umov w15, v22.b[12] -; CHECK-GI-NEXT: umov w14, v22.b[2] ; CHECK-GI-NEXT: mov v3.s[3], wzr -; CHECK-GI-NEXT: mov v28.s[1], w10 -; CHECK-GI-NEXT: umov w10, v22.b[13] +; CHECK-GI-NEXT: mov v21.s[3], w9 +; CHECK-GI-NEXT: ldr w9, [sp, #12] // 4-byte Folded Reload ; CHECK-GI-NEXT: mov v2.s[3], wzr -; CHECK-GI-NEXT: fmov s30, w11 -; CHECK-GI-NEXT: umov w11, v22.b[6] -; CHECK-GI-NEXT: mov v27.s[3], w8 -; CHECK-GI-NEXT: mov v25.s[2], w12 -; CHECK-GI-NEXT: mov v29.s[2], w13 -; CHECK-GI-NEXT: umov w13, v26.b[11] -; CHECK-GI-NEXT: fmov s31, w15 -; CHECK-GI-NEXT: umov w15, v26.b[15] -; CHECK-GI-NEXT: umov w12, v22.b[9] -; CHECK-GI-NEXT: mov v30.s[1], w9 -; CHECK-GI-NEXT: umov w9, v22.b[8] -; CHECK-GI-NEXT: mov v28.s[2], w14 -; CHECK-GI-NEXT: ldrb w14, [x1, #32] -; CHECK-GI-NEXT: umov w8, v22.b[15] -; CHECK-GI-NEXT: mul v17.4s, v17.4s, v27.4s -; CHECK-GI-NEXT: mov v31.s[1], w10 -; CHECK-GI-NEXT: umov w10, v22.b[14] -; CHECK-GI-NEXT: mov v25.s[3], w13 -; CHECK-GI-NEXT: ldrb w13, [x0, #32] -; CHECK-GI-NEXT: mov v29.s[3], w15 +; CHECK-GI-NEXT: mov v23.s[2], w11 +; CHECK-GI-NEXT: umov w11, v19.h[7] +; CHECK-GI-NEXT: fmov s19, w8 +; CHECK-GI-NEXT: mov v22.s[3], w12 ; CHECK-GI-NEXT: mov v4.s[3], wzr -; CHECK-GI-NEXT: mov v30.s[2], w11 -; CHECK-GI-NEXT: fmov s26, w9 -; CHECK-GI-NEXT: umov w9, v22.b[7] -; CHECK-GI-NEXT: umov w11, v22.b[3] ; CHECK-GI-NEXT: add v5.4s, v5.4s, v6.4s -; CHECK-GI-NEXT: mla v17.4s, v7.4s, v24.4s -; CHECK-GI-NEXT: mov v31.s[2], w10 +; CHECK-GI-NEXT: add v6.4s, v7.4s, v16.4s +; CHECK-GI-NEXT: add v7.4s, v17.4s, v18.4s ; CHECK-GI-NEXT: add v1.4s, v1.4s, v3.4s -; CHECK-GI-NEXT: mov v26.s[1], w12 -; CHECK-GI-NEXT: umov w12, v22.b[10] -; CHECK-GI-NEXT: mul v19.4s, v19.4s, v29.4s -; CHECK-GI-NEXT: mov v30.s[3], w9 -; CHECK-GI-NEXT: mul w9, w14, w13 -; CHECK-GI-NEXT: add v2.4s, v2.4s, v4.4s -; CHECK-GI-NEXT: mov v28.s[3], w11 +; CHECK-GI-NEXT: mov v19.s[1], wzr +; CHECK-GI-NEXT: add v16.4s, v20.4s, v21.4s +; CHECK-GI-NEXT: mov v23.s[3], w11 ; CHECK-GI-NEXT: add v0.4s, v0.4s, v5.4s -; CHECK-GI-NEXT: mov v31.s[3], w8 -; CHECK-GI-NEXT: umov w8, v22.b[11] -; CHECK-GI-NEXT: fmov s8, w9 -; CHECK-GI-NEXT: mov v26.s[2], w12 -; CHECK-GI-NEXT: mla v19.4s, v16.4s, v25.4s -; CHECK-GI-NEXT: mul v20.4s, v20.4s, v30.4s +; CHECK-GI-NEXT: add v2.4s, v2.4s, v4.4s +; CHECK-GI-NEXT: add v3.4s, v6.4s, v7.4s +; CHECK-GI-NEXT: mov v19.s[2], wzr +; CHECK-GI-NEXT: add v17.4s, v22.4s, v23.4s ; CHECK-GI-NEXT: add v1.4s, v1.4s, v2.4s -; CHECK-GI-NEXT: mov v8.s[1], wzr -; CHECK-GI-NEXT: mul v22.4s, v23.4s, v31.4s -; CHECK-GI-NEXT: mov v26.s[3], w8 -; CHECK-GI-NEXT: add v3.4s, v17.4s, v19.4s -; CHECK-GI-NEXT: mla v20.4s, v18.4s, v28.4s -; CHECK-GI-NEXT: mov v8.s[2], wzr -; CHECK-GI-NEXT: mla v22.4s, v21.4s, v26.4s -; CHECK-GI-NEXT: mov v8.s[3], wzr -; CHECK-GI-NEXT: add v4.4s, v20.4s, v22.4s -; CHECK-GI-NEXT: add v0.4s, v8.4s, v0.4s +; CHECK-GI-NEXT: mov v19.s[3], wzr +; CHECK-GI-NEXT: add v4.4s, v16.4s, v17.4s ; CHECK-GI-NEXT: add v2.4s, v3.4s, v4.4s +; CHECK-GI-NEXT: add v0.4s, v19.4s, v0.4s ; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s ; CHECK-GI-NEXT: add v0.4s, v2.4s, v0.4s ; CHECK-GI-NEXT: addv s0, v0.4s ; CHECK-GI-NEXT: fmov w8, s0 -; CHECK-GI-NEXT: add w0, w8, w2 -; CHECK-GI-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload +; CHECK-GI-NEXT: add w0, w8, w9 +; CHECK-GI-NEXT: add sp, sp, #112 ; CHECK-GI-NEXT: ret entry: %0 = load <33 x i8>, ptr %a @@ -4359,197 +5143,412 @@ define i32 @test_sdot_v33i8(ptr nocapture readonly %a, ptr nocapture readonly %b ; ; CHECK-GI-LABEL: test_sdot_v33i8: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill -; CHECK-GI-NEXT: .cfi_def_cfa_offset 16 -; CHECK-GI-NEXT: .cfi_offset b8, -16 -; CHECK-GI-NEXT: ldp q21, q25, [x1] -; CHECK-GI-NEXT: fmov s5, wzr -; CHECK-GI-NEXT: ldp q26, q22, [x0] -; CHECK-GI-NEXT: fmov s6, wzr -; CHECK-GI-NEXT: fmov s0, wzr +; CHECK-GI-NEXT: sub sp, sp, #112 +; CHECK-GI-NEXT: stp x29, x30, [sp, #16] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 112 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w20, -16 +; CHECK-GI-NEXT: .cfi_offset w21, -24 +; CHECK-GI-NEXT: .cfi_offset w22, -32 +; CHECK-GI-NEXT: .cfi_offset w23, -40 +; CHECK-GI-NEXT: .cfi_offset w24, -48 +; CHECK-GI-NEXT: .cfi_offset w25, -56 +; CHECK-GI-NEXT: .cfi_offset w26, -64 +; CHECK-GI-NEXT: .cfi_offset w27, -72 +; CHECK-GI-NEXT: .cfi_offset w28, -80 +; CHECK-GI-NEXT: .cfi_offset w30, -88 +; CHECK-GI-NEXT: .cfi_offset w29, -96 +; CHECK-GI-NEXT: ldp q7, q16, [x1] ; CHECK-GI-NEXT: fmov s1, wzr +; CHECK-GI-NEXT: str w2, [sp, #12] // 4-byte Folded Spill ; CHECK-GI-NEXT: fmov s3, wzr -; CHECK-GI-NEXT: smov w8, v21.b[0] -; CHECK-GI-NEXT: smov w9, v21.b[4] -; CHECK-GI-NEXT: smov w10, v21.b[1] -; CHECK-GI-NEXT: smov w13, v21.b[8] -; CHECK-GI-NEXT: smov w11, v21.b[5] -; CHECK-GI-NEXT: smov w14, v21.b[9] -; CHECK-GI-NEXT: smov w15, v25.b[0] -; CHECK-GI-NEXT: smov w12, v21.b[2] ; CHECK-GI-NEXT: fmov s2, wzr +; CHECK-GI-NEXT: fmov s5, wzr ; CHECK-GI-NEXT: fmov s4, wzr -; CHECK-GI-NEXT: mov v5.s[1], wzr -; CHECK-GI-NEXT: mov v6.s[1], wzr -; CHECK-GI-NEXT: fmov s7, w8 -; CHECK-GI-NEXT: fmov s17, w9 -; CHECK-GI-NEXT: smov w8, v21.b[6] -; CHECK-GI-NEXT: fmov s16, w13 -; CHECK-GI-NEXT: smov w9, v21.b[3] -; CHECK-GI-NEXT: smov w13, v21.b[7] -; CHECK-GI-NEXT: fmov s18, w15 -; CHECK-GI-NEXT: smov w15, v25.b[4] -; CHECK-GI-NEXT: mov v0.s[1], wzr -; CHECK-GI-NEXT: mov v7.s[1], w10 -; CHECK-GI-NEXT: smov w10, v21.b[12] -; CHECK-GI-NEXT: mov v17.s[1], w11 -; CHECK-GI-NEXT: smov w11, v21.b[13] -; CHECK-GI-NEXT: mov v16.s[1], w14 -; CHECK-GI-NEXT: smov w14, v25.b[1] +; CHECK-GI-NEXT: fmov s6, wzr +; CHECK-GI-NEXT: mov b19, v7.b[3] +; CHECK-GI-NEXT: mov b23, v7.b[7] +; CHECK-GI-NEXT: mov b17, v7.b[1] +; CHECK-GI-NEXT: fmov w11, s7 +; CHECK-GI-NEXT: mov b18, v7.b[2] +; CHECK-GI-NEXT: mov b20, v7.b[4] +; CHECK-GI-NEXT: mov b21, v7.b[5] +; CHECK-GI-NEXT: mov b22, v7.b[6] +; CHECK-GI-NEXT: mov b24, v7.b[8] +; CHECK-GI-NEXT: mov b25, v7.b[9] +; CHECK-GI-NEXT: mov b26, v7.b[10] +; CHECK-GI-NEXT: mov b27, v7.b[11] +; CHECK-GI-NEXT: sxtb w11, w11 +; CHECK-GI-NEXT: mov b28, v7.b[12] +; CHECK-GI-NEXT: fmov w14, s19 +; CHECK-GI-NEXT: mov b19, v7.b[13] +; CHECK-GI-NEXT: mov b29, v7.b[14] +; CHECK-GI-NEXT: mov b7, v7.b[15] +; CHECK-GI-NEXT: fmov w7, s23 +; CHECK-GI-NEXT: mov b23, v16.b[6] +; CHECK-GI-NEXT: fmov w10, s17 +; CHECK-GI-NEXT: fmov w9, s18 +; CHECK-GI-NEXT: fmov w8, s24 +; CHECK-GI-NEXT: mov b30, v16.b[1] +; CHECK-GI-NEXT: fmov w16, s25 +; CHECK-GI-NEXT: fmov w12, s20 +; CHECK-GI-NEXT: fmov w24, s21 +; CHECK-GI-NEXT: sxtb w10, w10 +; CHECK-GI-NEXT: sxtb w7, w7 +; CHECK-GI-NEXT: fmov w22, s22 +; CHECK-GI-NEXT: stp s23, s7, [sp, #4] // 8-byte Folded Spill +; CHECK-GI-NEXT: sxtb w9, w9 +; CHECK-GI-NEXT: sxtb w8, w8 +; CHECK-GI-NEXT: fmov s7, w11 +; CHECK-GI-NEXT: mov b20, v16.b[2] +; CHECK-GI-NEXT: mov b17, v16.b[3] +; CHECK-GI-NEXT: mov b21, v16.b[4] +; CHECK-GI-NEXT: mov b18, v16.b[5] +; CHECK-GI-NEXT: fmov w27, s26 +; CHECK-GI-NEXT: fmov w25, s27 +; CHECK-GI-NEXT: mov b22, v16.b[7] +; CHECK-GI-NEXT: fmov w26, s28 +; CHECK-GI-NEXT: mov v7.h[1], w10 +; CHECK-GI-NEXT: sxtb w10, w16 +; CHECK-GI-NEXT: mov b25, v16.b[8] +; CHECK-GI-NEXT: fmov w23, s19 +; CHECK-GI-NEXT: mov b24, v16.b[9] +; CHECK-GI-NEXT: fmov w5, s29 +; CHECK-GI-NEXT: mov b26, v16.b[10] +; CHECK-GI-NEXT: mov b19, v16.b[11] +; CHECK-GI-NEXT: fmov w6, s30 +; CHECK-GI-NEXT: mov b27, v16.b[12] +; CHECK-GI-NEXT: mov b28, v16.b[13] +; CHECK-GI-NEXT: mov b29, v16.b[14] +; CHECK-GI-NEXT: sxtb w30, w23 +; CHECK-GI-NEXT: sxtb w5, w5 +; CHECK-GI-NEXT: mov v7.h[2], w9 +; CHECK-GI-NEXT: sxtb w9, w14 +; CHECK-GI-NEXT: fmov w20, s16 +; CHECK-GI-NEXT: mov b30, v16.b[15] +; CHECK-GI-NEXT: fmov s16, w8 +; CHECK-GI-NEXT: sxtb w8, w12 +; CHECK-GI-NEXT: fmov w15, s17 +; CHECK-GI-NEXT: fmov w11, s18 +; CHECK-GI-NEXT: ldp q18, q17, [x0] +; CHECK-GI-NEXT: mov v7.h[3], w9 +; CHECK-GI-NEXT: sxtb w9, w27 +; CHECK-GI-NEXT: fmov w18, s20 +; CHECK-GI-NEXT: sxtb w15, w15 +; CHECK-GI-NEXT: mov v16.h[1], w10 +; CHECK-GI-NEXT: sxtb w10, w25 +; CHECK-GI-NEXT: mov b20, v18.b[3] +; CHECK-GI-NEXT: fmov w2, s22 +; CHECK-GI-NEXT: mov b22, v18.b[1] +; CHECK-GI-NEXT: sxtb w18, w18 +; CHECK-GI-NEXT: fmov w13, s21 +; CHECK-GI-NEXT: mov b21, v18.b[2] +; CHECK-GI-NEXT: mov v7.h[4], w8 +; CHECK-GI-NEXT: fmov w3, s19 +; CHECK-GI-NEXT: mov b19, v18.b[6] +; CHECK-GI-NEXT: mov v16.h[2], w9 +; CHECK-GI-NEXT: sxtb w9, w24 +; CHECK-GI-NEXT: fmov w21, s25 +; CHECK-GI-NEXT: sxtb w13, w13 +; CHECK-GI-NEXT: fmov w28, s20 +; CHECK-GI-NEXT: mov b20, v18.b[11] +; CHECK-GI-NEXT: fmov w8, s22 +; CHECK-GI-NEXT: mov b25, v18.b[8] +; CHECK-GI-NEXT: fmov w29, s21 +; CHECK-GI-NEXT: mov v7.h[5], w9 +; CHECK-GI-NEXT: sxtb w9, w22 +; CHECK-GI-NEXT: fmov w19, s24 +; CHECK-GI-NEXT: mov v16.h[3], w10 +; CHECK-GI-NEXT: sxtb w10, w26 +; CHECK-GI-NEXT: fmov w26, s18 +; CHECK-GI-NEXT: sxtb w8, w8 +; CHECK-GI-NEXT: sxtb w29, w29 +; CHECK-GI-NEXT: mov b24, v18.b[4] +; CHECK-GI-NEXT: mov b23, v18.b[5] +; CHECK-GI-NEXT: fmov w17, s27 +; CHECK-GI-NEXT: mov b27, v18.b[9] +; CHECK-GI-NEXT: sxtb w23, w26 +; CHECK-GI-NEXT: mov v7.h[6], w9 +; CHECK-GI-NEXT: fmov w24, s19 +; CHECK-GI-NEXT: mov v16.h[4], w10 +; CHECK-GI-NEXT: mov b19, v18.b[14] +; CHECK-GI-NEXT: fmov w10, s25 +; CHECK-GI-NEXT: fmov w4, s26 +; CHECK-GI-NEXT: fmov w16, s28 +; CHECK-GI-NEXT: mov b26, v18.b[7] +; CHECK-GI-NEXT: mov b28, v18.b[10] +; CHECK-GI-NEXT: fmov w27, s24 +; CHECK-GI-NEXT: mov b24, v18.b[12] +; CHECK-GI-NEXT: sxtb w10, w10 +; CHECK-GI-NEXT: mov v7.h[7], w7 +; CHECK-GI-NEXT: fmov w7, s20 +; CHECK-GI-NEXT: sxtb w4, w4 +; CHECK-GI-NEXT: fmov s20, w23 +; CHECK-GI-NEXT: fmov w25, s23 +; CHECK-GI-NEXT: mov b23, v18.b[13] +; CHECK-GI-NEXT: mov b22, v18.b[15] +; CHECK-GI-NEXT: mov v16.h[5], w30 +; CHECK-GI-NEXT: sxtb w7, w7 +; CHECK-GI-NEXT: fmov w9, s27 +; CHECK-GI-NEXT: mov b21, v17.b[1] +; CHECK-GI-NEXT: mov v20.h[1], w8 +; CHECK-GI-NEXT: sxtb w8, w20 +; CHECK-GI-NEXT: sxtb w20, w6 +; CHECK-GI-NEXT: fmov w6, s19 +; CHECK-GI-NEXT: fmov w26, s28 +; CHECK-GI-NEXT: mov b28, v17.b[8] +; CHECK-GI-NEXT: fmov s18, w8 +; CHECK-GI-NEXT: sxtb w8, w21 +; CHECK-GI-NEXT: mov v16.h[6], w5 +; CHECK-GI-NEXT: fmov w5, s22 +; CHECK-GI-NEXT: fmov s22, w10 +; CHECK-GI-NEXT: sxtb w10, w27 +; CHECK-GI-NEXT: sxtb w26, w26 +; CHECK-GI-NEXT: mov v20.h[2], w29 +; CHECK-GI-NEXT: fmov s19, w8 +; CHECK-GI-NEXT: sxtb w8, w28 +; CHECK-GI-NEXT: sxtb w28, w19 +; CHECK-GI-NEXT: sxtb w19, w9 +; CHECK-GI-NEXT: fmov w27, s17 +; CHECK-GI-NEXT: mov b25, v17.b[2] +; CHECK-GI-NEXT: fmov w29, s21 +; CHECK-GI-NEXT: mov b21, v17.b[9] +; CHECK-GI-NEXT: mov v22.h[1], w19 +; CHECK-GI-NEXT: fmov w23, s23 +; CHECK-GI-NEXT: mov v20.h[3], w8 +; CHECK-GI-NEXT: mov b23, v17.b[6] +; CHECK-GI-NEXT: fmov w30, s24 +; CHECK-GI-NEXT: sxtb w27, w27 +; CHECK-GI-NEXT: mov b24, v17.b[5] +; CHECK-GI-NEXT: mov v18.h[1], w20 +; CHECK-GI-NEXT: fmov w21, s25 +; CHECK-GI-NEXT: mov b25, v17.b[10] +; CHECK-GI-NEXT: mov v19.h[1], w28 +; CHECK-GI-NEXT: sxtb w28, w29 +; CHECK-GI-NEXT: mov v22.h[2], w26 +; CHECK-GI-NEXT: fmov w26, s21 +; CHECK-GI-NEXT: mov v20.h[4], w10 +; CHECK-GI-NEXT: fmov w10, s28 +; CHECK-GI-NEXT: fmov s21, w27 +; CHECK-GI-NEXT: sxtb w21, w21 +; CHECK-GI-NEXT: mov b27, v17.b[3] +; CHECK-GI-NEXT: fmov w19, s23 +; CHECK-GI-NEXT: sxtb w26, w26 +; CHECK-GI-NEXT: fmov w22, s26 +; CHECK-GI-NEXT: mov b26, v17.b[4] +; CHECK-GI-NEXT: sxtb w10, w10 +; CHECK-GI-NEXT: mov v21.h[1], w28 +; CHECK-GI-NEXT: fmov w8, s24 +; CHECK-GI-NEXT: mov b24, v17.b[11] +; CHECK-GI-NEXT: fmov w27, s25 +; CHECK-GI-NEXT: mov v18.h[2], w18 +; CHECK-GI-NEXT: sxtb w18, w25 +; CHECK-GI-NEXT: fmov s23, w10 +; CHECK-GI-NEXT: fmov w20, s27 +; CHECK-GI-NEXT: sxtb w8, w8 +; CHECK-GI-NEXT: fmov w9, s26 +; CHECK-GI-NEXT: mov b26, v17.b[12] +; CHECK-GI-NEXT: sxtb w25, w27 +; CHECK-GI-NEXT: mov v20.h[5], w18 +; CHECK-GI-NEXT: sxtb w18, w3 +; CHECK-GI-NEXT: sxtb w3, w24 +; CHECK-GI-NEXT: mov v23.h[1], w26 +; CHECK-GI-NEXT: mov v21.h[2], w21 +; CHECK-GI-NEXT: sxtb w9, w9 +; CHECK-GI-NEXT: fmov w28, s24 +; CHECK-GI-NEXT: mov v22.h[3], w7 +; CHECK-GI-NEXT: sxtb w7, w20 +; CHECK-GI-NEXT: mov v19.h[2], w4 +; CHECK-GI-NEXT: sxtb w4, w30 +; CHECK-GI-NEXT: ldp x29, x30, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v18.h[3], w15 +; CHECK-GI-NEXT: sxtb w20, w28 +; CHECK-GI-NEXT: sxtb w15, w17 +; CHECK-GI-NEXT: sxtb w17, w22 +; CHECK-GI-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v23.h[2], w25 +; CHECK-GI-NEXT: mov v20.h[6], w3 +; CHECK-GI-NEXT: mov v21.h[3], w7 +; CHECK-GI-NEXT: fmov w10, s26 +; CHECK-GI-NEXT: mov v22.h[4], w4 +; CHECK-GI-NEXT: mov v19.h[3], w18 +; CHECK-GI-NEXT: sxtb w18, w23 +; CHECK-GI-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov b27, v17.b[13] +; CHECK-GI-NEXT: sxtb w10, w10 +; CHECK-GI-NEXT: mov v23.h[3], w20 +; CHECK-GI-NEXT: mov v18.h[4], w13 +; CHECK-GI-NEXT: sxtb w13, w6 +; CHECK-GI-NEXT: mov v20.h[7], w17 +; CHECK-GI-NEXT: mov v21.h[4], w9 +; CHECK-GI-NEXT: ldr w9, [sp, #8] // 4-byte Folded Reload +; CHECK-GI-NEXT: mov v22.h[5], w18 +; CHECK-GI-NEXT: mov b25, v17.b[14] +; CHECK-GI-NEXT: fmov w26, s27 +; CHECK-GI-NEXT: mov v19.h[4], w15 +; CHECK-GI-NEXT: fmov w14, s29 +; CHECK-GI-NEXT: sxtb w9, w9 +; CHECK-GI-NEXT: mov v23.h[4], w10 +; CHECK-GI-NEXT: sxtb w10, w11 +; CHECK-GI-NEXT: sxtb w11, w16 +; CHECK-GI-NEXT: mov v21.h[5], w8 +; CHECK-GI-NEXT: ldr w8, [sp, #4] // 4-byte Folded Reload +; CHECK-GI-NEXT: sxtb w15, w26 +; CHECK-GI-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: sxtb w8, w8 +; CHECK-GI-NEXT: mov v18.h[5], w10 +; CHECK-GI-NEXT: sxtb w10, w19 +; CHECK-GI-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload +; CHECK-GI-NEXT: mul v20.8h, v7.8h, v20.8h +; CHECK-GI-NEXT: mov b7, v17.b[7] +; CHECK-GI-NEXT: mov v22.h[6], w13 +; CHECK-GI-NEXT: sxtb w13, w5 +; CHECK-GI-NEXT: fmov w27, s25 +; CHECK-GI-NEXT: mov v19.h[5], w11 +; CHECK-GI-NEXT: sxtb w11, w2 +; CHECK-GI-NEXT: mov b17, v17.b[15] +; CHECK-GI-NEXT: mov v18.h[6], w8 +; CHECK-GI-NEXT: mov v16.h[7], w9 +; CHECK-GI-NEXT: sxtb w9, w14 +; CHECK-GI-NEXT: mov v23.h[5], w15 +; CHECK-GI-NEXT: mov v21.h[6], w10 +; CHECK-GI-NEXT: sxtb w14, w27 +; CHECK-GI-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: fmov w8, s7 +; CHECK-GI-NEXT: mov v22.h[7], w13 +; CHECK-GI-NEXT: fmov w12, s30 +; CHECK-GI-NEXT: mov v19.h[6], w9 +; CHECK-GI-NEXT: fmov w9, s17 +; CHECK-GI-NEXT: smov w10, v20.h[0] +; CHECK-GI-NEXT: mov v23.h[6], w14 +; CHECK-GI-NEXT: mov v18.h[7], w11 +; CHECK-GI-NEXT: smov w13, v20.h[1] +; CHECK-GI-NEXT: sxtb w8, w8 +; CHECK-GI-NEXT: sxtb w12, w12 +; CHECK-GI-NEXT: smov w11, v20.h[4] +; CHECK-GI-NEXT: sxtb w9, w9 +; CHECK-GI-NEXT: mul v22.8h, v16.8h, v22.8h +; CHECK-GI-NEXT: smov w14, v20.h[3] +; CHECK-GI-NEXT: mov v21.h[7], w8 +; CHECK-GI-NEXT: ldrsb w8, [x0, #32] +; CHECK-GI-NEXT: mov v19.h[7], w12 +; CHECK-GI-NEXT: mov v23.h[7], w9 +; CHECK-GI-NEXT: ldrsb w9, [x1, #32] +; CHECK-GI-NEXT: fmov s7, w10 +; CHECK-GI-NEXT: smov w10, v20.h[2] +; CHECK-GI-NEXT: smov w12, v20.h[5] +; CHECK-GI-NEXT: fmov s16, w11 +; CHECK-GI-NEXT: mul w9, w9, w8 +; CHECK-GI-NEXT: smov w15, v22.h[4] +; CHECK-GI-NEXT: smov w17, v22.h[5] +; CHECK-GI-NEXT: mul v24.8h, v18.8h, v21.8h +; CHECK-GI-NEXT: mov v7.s[1], w13 +; CHECK-GI-NEXT: smov w13, v22.h[0] +; CHECK-GI-NEXT: mul v18.8h, v19.8h, v23.8h +; CHECK-GI-NEXT: smov w16, v22.h[1] +; CHECK-GI-NEXT: smov w8, v20.h[7] +; CHECK-GI-NEXT: sxth w9, w9 +; CHECK-GI-NEXT: mov v16.s[1], w12 +; CHECK-GI-NEXT: fmov s0, wzr +; CHECK-GI-NEXT: fmov s19, w15 +; CHECK-GI-NEXT: smov w15, v22.h[6] ; CHECK-GI-NEXT: mov v1.s[1], wzr +; CHECK-GI-NEXT: smov w11, v24.h[0] +; CHECK-GI-NEXT: mov v7.s[2], w10 +; CHECK-GI-NEXT: smov w10, v20.h[6] +; CHECK-GI-NEXT: smov w12, v24.h[1] +; CHECK-GI-NEXT: smov w0, v18.h[4] +; CHECK-GI-NEXT: fmov s17, w13 +; CHECK-GI-NEXT: mov v19.s[1], w17 +; CHECK-GI-NEXT: smov w17, v18.h[0] +; CHECK-GI-NEXT: smov w18, v18.h[1] +; CHECK-GI-NEXT: smov w13, v22.h[2] ; CHECK-GI-NEXT: mov v3.s[1], wzr ; CHECK-GI-NEXT: mov v2.s[1], wzr -; CHECK-GI-NEXT: fmov s20, w15 -; CHECK-GI-NEXT: smov w15, v25.b[13] +; CHECK-GI-NEXT: fmov s20, w11 +; CHECK-GI-NEXT: smov w11, v24.h[4] +; CHECK-GI-NEXT: mov v7.s[3], w14 +; CHECK-GI-NEXT: smov w14, v24.h[5] +; CHECK-GI-NEXT: mov v17.s[1], w16 +; CHECK-GI-NEXT: smov w16, v24.h[2] +; CHECK-GI-NEXT: mov v19.s[2], w15 +; CHECK-GI-NEXT: smov w15, v18.h[5] +; CHECK-GI-NEXT: fmov s23, w0 +; CHECK-GI-NEXT: mov v20.s[1], w12 +; CHECK-GI-NEXT: mov v16.s[2], w10 +; CHECK-GI-NEXT: smov w10, v22.h[3] +; CHECK-GI-NEXT: fmov s21, w11 +; CHECK-GI-NEXT: smov w11, v22.h[7] +; CHECK-GI-NEXT: fmov s22, w17 +; CHECK-GI-NEXT: mov v5.s[1], wzr ; CHECK-GI-NEXT: mov v4.s[1], wzr -; CHECK-GI-NEXT: fmov s19, w10 -; CHECK-GI-NEXT: mov v7.s[2], w12 -; CHECK-GI-NEXT: smov w12, v21.b[10] -; CHECK-GI-NEXT: mov v18.s[1], w14 -; CHECK-GI-NEXT: smov w14, v25.b[5] -; CHECK-GI-NEXT: mov v17.s[2], w8 -; CHECK-GI-NEXT: smov w8, v21.b[11] -; CHECK-GI-NEXT: smov w10, v21.b[14] -; CHECK-GI-NEXT: mov v5.s[2], wzr -; CHECK-GI-NEXT: mov v19.s[1], w11 -; CHECK-GI-NEXT: smov w11, v25.b[2] -; CHECK-GI-NEXT: mov v6.s[2], wzr -; CHECK-GI-NEXT: mov v16.s[2], w12 -; CHECK-GI-NEXT: smov w12, v25.b[8] -; CHECK-GI-NEXT: mov v7.s[3], w9 -; CHECK-GI-NEXT: mov v20.s[1], w14 -; CHECK-GI-NEXT: smov w14, v21.b[15] -; CHECK-GI-NEXT: smov w9, v25.b[9] -; CHECK-GI-NEXT: mov v17.s[3], w13 -; CHECK-GI-NEXT: smov w13, v25.b[12] -; CHECK-GI-NEXT: mov v0.s[2], wzr -; CHECK-GI-NEXT: mov v18.s[2], w11 -; CHECK-GI-NEXT: smov w11, v26.b[0] -; CHECK-GI-NEXT: mov v19.s[2], w10 -; CHECK-GI-NEXT: fmov s21, w12 -; CHECK-GI-NEXT: smov w12, v26.b[1] -; CHECK-GI-NEXT: mov v16.s[3], w8 -; CHECK-GI-NEXT: smov w8, v26.b[5] -; CHECK-GI-NEXT: smov w10, v25.b[6] +; CHECK-GI-NEXT: mov v6.s[1], wzr +; CHECK-GI-NEXT: mov v23.s[1], w15 +; CHECK-GI-NEXT: smov w15, v18.h[6] +; CHECK-GI-NEXT: mov v0.s[1], wzr +; CHECK-GI-NEXT: mov v21.s[1], w14 +; CHECK-GI-NEXT: smov w14, v24.h[6] +; CHECK-GI-NEXT: mov v20.s[2], w16 +; CHECK-GI-NEXT: mov v22.s[1], w18 +; CHECK-GI-NEXT: smov w16, v18.h[2] ; CHECK-GI-NEXT: mov v1.s[2], wzr -; CHECK-GI-NEXT: fmov s23, w13 -; CHECK-GI-NEXT: smov w13, v25.b[3] ; CHECK-GI-NEXT: mov v3.s[2], wzr -; CHECK-GI-NEXT: fmov s24, w11 -; CHECK-GI-NEXT: mov v21.s[1], w9 -; CHECK-GI-NEXT: smov w9, v25.b[10] -; CHECK-GI-NEXT: smov w11, v26.b[2] -; CHECK-GI-NEXT: mov v19.s[3], w14 -; CHECK-GI-NEXT: smov w14, v26.b[13] -; CHECK-GI-NEXT: mov v23.s[1], w15 -; CHECK-GI-NEXT: smov w15, v25.b[14] -; CHECK-GI-NEXT: mov v20.s[2], w10 -; CHECK-GI-NEXT: mov v24.s[1], w12 -; CHECK-GI-NEXT: smov w12, v26.b[4] -; CHECK-GI-NEXT: smov w10, v25.b[7] -; CHECK-GI-NEXT: mov v21.s[2], w9 -; CHECK-GI-NEXT: smov w9, v25.b[11] -; CHECK-GI-NEXT: mov v18.s[3], w13 -; CHECK-GI-NEXT: smov w13, v26.b[9] ; CHECK-GI-NEXT: mov v2.s[2], wzr +; CHECK-GI-NEXT: mov v5.s[2], wzr ; CHECK-GI-NEXT: mov v4.s[2], wzr +; CHECK-GI-NEXT: mov v6.s[2], wzr ; CHECK-GI-NEXT: mov v23.s[2], w15 -; CHECK-GI-NEXT: smov w15, v25.b[15] -; CHECK-GI-NEXT: mov v5.s[3], wzr -; CHECK-GI-NEXT: fmov s27, w12 -; CHECK-GI-NEXT: mov v24.s[2], w11 -; CHECK-GI-NEXT: smov w11, v26.b[6] -; CHECK-GI-NEXT: smov w12, v26.b[8] -; CHECK-GI-NEXT: mov v21.s[3], w9 -; CHECK-GI-NEXT: smov w9, v26.b[12] -; CHECK-GI-NEXT: mov v20.s[3], w10 -; CHECK-GI-NEXT: smov w10, v26.b[3] -; CHECK-GI-NEXT: mov v6.s[3], wzr -; CHECK-GI-NEXT: mov v27.s[1], w8 -; CHECK-GI-NEXT: mov v23.s[3], w15 -; CHECK-GI-NEXT: smov w15, v22.b[0] -; CHECK-GI-NEXT: smov w8, v26.b[7] -; CHECK-GI-NEXT: mov v0.s[3], wzr +; CHECK-GI-NEXT: mov v21.s[2], w14 +; CHECK-GI-NEXT: smov w14, v18.h[3] +; CHECK-GI-NEXT: smov w15, v18.h[7] +; CHECK-GI-NEXT: fmov s18, w9 +; CHECK-GI-NEXT: ldr w9, [sp, #12] // 4-byte Folded Reload +; CHECK-GI-NEXT: mov v17.s[2], w13 +; CHECK-GI-NEXT: smov w12, v24.h[3] +; CHECK-GI-NEXT: smov w13, v24.h[7] +; CHECK-GI-NEXT: mov v22.s[2], w16 +; CHECK-GI-NEXT: mov v0.s[2], wzr ; CHECK-GI-NEXT: mov v1.s[3], wzr -; CHECK-GI-NEXT: fmov s25, w12 -; CHECK-GI-NEXT: fmov s29, w9 -; CHECK-GI-NEXT: smov w9, v22.b[5] -; CHECK-GI-NEXT: mov v24.s[3], w10 -; CHECK-GI-NEXT: smov w10, v22.b[1] -; CHECK-GI-NEXT: smov w12, v26.b[10] -; CHECK-GI-NEXT: mov v27.s[2], w11 -; CHECK-GI-NEXT: smov w11, v22.b[4] -; CHECK-GI-NEXT: fmov s28, w15 -; CHECK-GI-NEXT: mov v25.s[1], w13 -; CHECK-GI-NEXT: smov w13, v26.b[14] -; CHECK-GI-NEXT: mov v29.s[1], w14 -; CHECK-GI-NEXT: smov w15, v22.b[12] -; CHECK-GI-NEXT: smov w14, v22.b[2] ; CHECK-GI-NEXT: mov v3.s[3], wzr -; CHECK-GI-NEXT: mov v28.s[1], w10 -; CHECK-GI-NEXT: smov w10, v22.b[13] ; CHECK-GI-NEXT: mov v2.s[3], wzr -; CHECK-GI-NEXT: fmov s30, w11 -; CHECK-GI-NEXT: smov w11, v22.b[6] -; CHECK-GI-NEXT: mov v27.s[3], w8 -; CHECK-GI-NEXT: mov v25.s[2], w12 -; CHECK-GI-NEXT: mov v29.s[2], w13 -; CHECK-GI-NEXT: smov w13, v26.b[11] -; CHECK-GI-NEXT: fmov s31, w15 -; CHECK-GI-NEXT: smov w15, v26.b[15] -; CHECK-GI-NEXT: smov w12, v22.b[9] -; CHECK-GI-NEXT: mov v30.s[1], w9 -; CHECK-GI-NEXT: smov w9, v22.b[8] -; CHECK-GI-NEXT: mov v28.s[2], w14 -; CHECK-GI-NEXT: ldrsb w14, [x1, #32] -; CHECK-GI-NEXT: smov w8, v22.b[15] -; CHECK-GI-NEXT: mul v17.4s, v17.4s, v27.4s -; CHECK-GI-NEXT: mov v31.s[1], w10 -; CHECK-GI-NEXT: smov w10, v22.b[14] -; CHECK-GI-NEXT: mov v25.s[3], w13 -; CHECK-GI-NEXT: ldrsb w13, [x0, #32] -; CHECK-GI-NEXT: mov v29.s[3], w15 +; CHECK-GI-NEXT: mov v5.s[3], wzr ; CHECK-GI-NEXT: mov v4.s[3], wzr -; CHECK-GI-NEXT: mov v30.s[2], w11 -; CHECK-GI-NEXT: fmov s26, w9 -; CHECK-GI-NEXT: smov w9, v22.b[7] -; CHECK-GI-NEXT: smov w11, v22.b[3] -; CHECK-GI-NEXT: add v5.4s, v5.4s, v6.4s -; CHECK-GI-NEXT: mla v17.4s, v7.4s, v24.4s -; CHECK-GI-NEXT: mov v31.s[2], w10 +; CHECK-GI-NEXT: mov v6.s[3], wzr +; CHECK-GI-NEXT: mov v18.s[1], wzr +; CHECK-GI-NEXT: mov v16.s[3], w8 +; CHECK-GI-NEXT: mov v17.s[3], w10 +; CHECK-GI-NEXT: mov v19.s[3], w11 +; CHECK-GI-NEXT: mov v20.s[3], w12 +; CHECK-GI-NEXT: mov v21.s[3], w13 +; CHECK-GI-NEXT: mov v22.s[3], w14 +; CHECK-GI-NEXT: mov v23.s[3], w15 +; CHECK-GI-NEXT: mov v0.s[3], wzr ; CHECK-GI-NEXT: add v1.4s, v1.4s, v3.4s -; CHECK-GI-NEXT: mov v26.s[1], w12 -; CHECK-GI-NEXT: smov w12, v22.b[10] -; CHECK-GI-NEXT: mul v19.4s, v19.4s, v29.4s -; CHECK-GI-NEXT: mov v30.s[3], w9 -; CHECK-GI-NEXT: mul w9, w14, w13 -; CHECK-GI-NEXT: add v2.4s, v2.4s, v4.4s -; CHECK-GI-NEXT: mov v28.s[3], w11 -; CHECK-GI-NEXT: add v0.4s, v0.4s, v5.4s -; CHECK-GI-NEXT: mov v31.s[3], w8 -; CHECK-GI-NEXT: smov w8, v22.b[11] -; CHECK-GI-NEXT: fmov s8, w9 -; CHECK-GI-NEXT: mov v26.s[2], w12 -; CHECK-GI-NEXT: mla v19.4s, v16.4s, v25.4s -; CHECK-GI-NEXT: mul v20.4s, v20.4s, v30.4s -; CHECK-GI-NEXT: add v1.4s, v1.4s, v2.4s -; CHECK-GI-NEXT: mov v8.s[1], wzr -; CHECK-GI-NEXT: mul v22.4s, v23.4s, v31.4s -; CHECK-GI-NEXT: mov v26.s[3], w8 -; CHECK-GI-NEXT: add v3.4s, v17.4s, v19.4s -; CHECK-GI-NEXT: mla v20.4s, v18.4s, v28.4s -; CHECK-GI-NEXT: mov v8.s[2], wzr -; CHECK-GI-NEXT: mla v22.4s, v21.4s, v26.4s -; CHECK-GI-NEXT: mov v8.s[3], wzr -; CHECK-GI-NEXT: add v4.4s, v20.4s, v22.4s -; CHECK-GI-NEXT: add v0.4s, v8.4s, v0.4s -; CHECK-GI-NEXT: add v2.4s, v3.4s, v4.4s +; CHECK-GI-NEXT: add v2.4s, v2.4s, v5.4s +; CHECK-GI-NEXT: add v3.4s, v4.4s, v6.4s +; CHECK-GI-NEXT: mov v18.s[2], wzr +; CHECK-GI-NEXT: add v4.4s, v7.4s, v16.4s +; CHECK-GI-NEXT: add v5.4s, v17.4s, v19.4s +; CHECK-GI-NEXT: add v6.4s, v20.4s, v21.4s +; CHECK-GI-NEXT: add v7.4s, v22.4s, v23.4s ; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s -; CHECK-GI-NEXT: add v0.4s, v2.4s, v0.4s +; CHECK-GI-NEXT: add v1.4s, v2.4s, v3.4s +; CHECK-GI-NEXT: mov v18.s[3], wzr +; CHECK-GI-NEXT: add v2.4s, v4.4s, v5.4s +; CHECK-GI-NEXT: add v3.4s, v6.4s, v7.4s +; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s +; CHECK-GI-NEXT: add v1.4s, v2.4s, v3.4s +; CHECK-GI-NEXT: add v0.4s, v18.4s, v0.4s +; CHECK-GI-NEXT: add v0.4s, v1.4s, v0.4s ; CHECK-GI-NEXT: addv s0, v0.4s ; CHECK-GI-NEXT: fmov w8, s0 -; CHECK-GI-NEXT: add w0, w8, w2 -; CHECK-GI-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload +; CHECK-GI-NEXT: add w0, w8, w9 +; CHECK-GI-NEXT: add sp, sp, #112 ; CHECK-GI-NEXT: ret entry: %0 = load <33 x i8>, ptr %a @@ -4845,13 +5844,12 @@ define i32 @test_sdot_v33i8_double(<33 x i8> %a, <33 x i8> %b, <33 x i8> %c, <33 ; ; CHECK-GI-LABEL: test_sdot_v33i8_double: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: sub sp, sp, #96 -; CHECK-GI-NEXT: stp d15, d14, [sp, #16] // 16-byte Folded Spill -; CHECK-GI-NEXT: stp d13, d12, [sp, #32] // 16-byte Folded Spill -; CHECK-GI-NEXT: stp d11, d10, [sp, #48] // 16-byte Folded Spill -; CHECK-GI-NEXT: stp d9, d8, [sp, #64] // 16-byte Folded Spill -; CHECK-GI-NEXT: str x29, [sp, #80] // 8-byte Folded Spill -; CHECK-GI-NEXT: .cfi_def_cfa_offset 96 +; CHECK-GI-NEXT: stp d15, d14, [sp, #-80]! // 16-byte Folded Spill +; CHECK-GI-NEXT: stp d13, d12, [sp, #16] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp d11, d10, [sp, #32] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp d9, d8, [sp, #48] // 16-byte Folded Spill +; CHECK-GI-NEXT: str x29, [sp, #64] // 8-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 80 ; CHECK-GI-NEXT: .cfi_offset w29, -16 ; CHECK-GI-NEXT: .cfi_offset b8, -24 ; CHECK-GI-NEXT: .cfi_offset b9, -32 @@ -4861,508 +5859,762 @@ define i32 @test_sdot_v33i8_double(<33 x i8> %a, <33 x i8> %b, <33 x i8> %c, <33 ; CHECK-GI-NEXT: .cfi_offset b13, -64 ; CHECK-GI-NEXT: .cfi_offset b14, -72 ; CHECK-GI-NEXT: .cfi_offset b15, -80 -; CHECK-GI-NEXT: sxtb w8, w0 -; CHECK-GI-NEXT: sxtb w9, w1 -; CHECK-GI-NEXT: sxtb w10, w2 -; CHECK-GI-NEXT: sxtb w11, w4 -; CHECK-GI-NEXT: sxtb w12, w5 -; CHECK-GI-NEXT: sxtb w13, w7 -; CHECK-GI-NEXT: fmov s28, w8 +; CHECK-GI-NEXT: lsl w8, w0, #8 +; CHECK-GI-NEXT: ldr w10, [sp, #80] +; CHECK-GI-NEXT: lsl w11, w1, #8 +; CHECK-GI-NEXT: ldr w9, [sp, #88] +; CHECK-GI-NEXT: ldr w13, [sp, #128] +; CHECK-GI-NEXT: ldr w14, [sp, #136] +; CHECK-GI-NEXT: sbfx w12, w8, #8, #8 +; CHECK-GI-NEXT: lsl w10, w10, #8 +; CHECK-GI-NEXT: sbfx w8, w11, #8, #8 +; CHECK-GI-NEXT: lsl w9, w9, #8 +; CHECK-GI-NEXT: lsl w11, w2, #8 +; CHECK-GI-NEXT: lsl w13, w13, #8 +; CHECK-GI-NEXT: fmov s22, w12 +; CHECK-GI-NEXT: sbfx w10, w10, #8, #8 +; CHECK-GI-NEXT: ldr w12, [sp, #152] +; CHECK-GI-NEXT: sbfx w9, w9, #8, #8 +; CHECK-GI-NEXT: lsl w16, w7, #8 +; CHECK-GI-NEXT: lsl w14, w14, #8 +; CHECK-GI-NEXT: fmov s23, w10 +; CHECK-GI-NEXT: sbfx w10, w11, #8, #8 +; CHECK-GI-NEXT: lsl w11, w3, #8 +; CHECK-GI-NEXT: mov v22.h[1], w8 ; CHECK-GI-NEXT: ldr w8, [sp, #96] -; CHECK-GI-NEXT: fmov s0, wzr -; CHECK-GI-NEXT: fmov s25, w11 -; CHECK-GI-NEXT: sxtb w11, w6 -; CHECK-GI-NEXT: ldr w14, [sp, #528] -; CHECK-GI-NEXT: sxtb w8, w8 -; CHECK-GI-NEXT: fmov s18, wzr -; CHECK-GI-NEXT: fmov s20, wzr -; CHECK-GI-NEXT: mov v28.s[1], w9 +; CHECK-GI-NEXT: lsl w12, w12, #8 +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: sbfx w16, w16, #8, #8 +; CHECK-GI-NEXT: ldr w15, [sp, #176] +; CHECK-GI-NEXT: lsl w8, w8, #8 +; CHECK-GI-NEXT: mov v23.h[1], w9 ; CHECK-GI-NEXT: ldr w9, [sp, #104] -; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-GI-NEXT: fmov s24, w8 +; CHECK-GI-NEXT: sbfx w12, w12, #8, #8 +; CHECK-GI-NEXT: lsl w15, w15, #8 +; CHECK-GI-NEXT: ldr w17, [sp, #224] +; CHECK-GI-NEXT: mov v22.h[2], w10 +; CHECK-GI-NEXT: sbfx w8, w8, #8, #8 +; CHECK-GI-NEXT: sbfx w10, w11, #8, #8 +; CHECK-GI-NEXT: lsl w9, w9, #8 +; CHECK-GI-NEXT: lsl w11, w4, #8 +; CHECK-GI-NEXT: sbfx w15, w15, #8, #8 +; CHECK-GI-NEXT: mov v23.h[2], w8 ; CHECK-GI-NEXT: ldr w8, [sp, #112] -; CHECK-GI-NEXT: mov v25.s[1], w12 -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: ldr w12, [sp, #136] -; CHECK-GI-NEXT: mov v18.s[1], wzr -; CHECK-GI-NEXT: sxtb w8, w8 -; CHECK-GI-NEXT: mov v20.s[1], wzr ; CHECK-GI-NEXT: fmov s19, wzr -; CHECK-GI-NEXT: mov v28.s[2], w10 -; CHECK-GI-NEXT: sxtb w10, w3 -; CHECK-GI-NEXT: mov v24.s[1], w9 -; CHECK-GI-NEXT: ldr w9, [sp, #128] -; CHECK-GI-NEXT: mov v25.s[2], w11 -; CHECK-GI-NEXT: ldr w11, [sp, #168] -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: mov v18.s[2], wzr +; CHECK-GI-NEXT: sbfx w9, w9, #8, #8 +; CHECK-GI-NEXT: sbfx w11, w11, #8, #8 ; CHECK-GI-NEXT: fmov s21, wzr -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: mov v20.s[2], wzr -; CHECK-GI-NEXT: mov v28.s[3], w10 -; CHECK-GI-NEXT: ldr w10, [sp, #160] -; CHECK-GI-NEXT: mov v24.s[2], w8 -; CHECK-GI-NEXT: ldr w8, [sp, #120] -; CHECK-GI-NEXT: fmov s30, w9 -; CHECK-GI-NEXT: ldr w9, [sp, #144] -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: mov v25.s[3], w13 -; CHECK-GI-NEXT: ldr w13, [sp, #200] -; CHECK-GI-NEXT: sxtb w8, w8 -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: mov v19.s[1], wzr -; CHECK-GI-NEXT: fmov s22, w10 -; CHECK-GI-NEXT: mov v30.s[1], w12 -; CHECK-GI-NEXT: ldr w10, [sp, #176] -; CHECK-GI-NEXT: mov v24.s[3], w8 -; CHECK-GI-NEXT: ldr w8, [sp, #224] -; CHECK-GI-NEXT: ldr w12, [sp, #152] -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: sxtb w13, w13 -; CHECK-GI-NEXT: mov v21.s[1], wzr -; CHECK-GI-NEXT: mov v22.s[1], w11 -; CHECK-GI-NEXT: ldr w11, [sp, #192] -; CHECK-GI-NEXT: sxtb w8, w8 -; CHECK-GI-NEXT: mov v30.s[2], w9 -; CHECK-GI-NEXT: ldr w9, [sp, #232] -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: fmov s23, w8 -; CHECK-GI-NEXT: ldr w8, [sp, #240] -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: mov v18.s[3], wzr -; CHECK-GI-NEXT: mov v20.s[3], wzr -; CHECK-GI-NEXT: mov v22.s[2], w10 -; CHECK-GI-NEXT: ldr w10, [sp, #184] -; CHECK-GI-NEXT: fmov s26, w11 -; CHECK-GI-NEXT: mov v23.s[1], w9 -; CHECK-GI-NEXT: ldr w9, [sp, #256] -; CHECK-GI-NEXT: ldr w11, [sp, #208] -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: mov v30.s[3], w12 -; CHECK-GI-NEXT: ldr w12, [sp, #264] -; CHECK-GI-NEXT: mov v26.s[1], w13 -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: mov v22.s[3], w10 -; CHECK-GI-NEXT: ldr w10, [sp, #296] -; CHECK-GI-NEXT: sxtb w8, w8 -; CHECK-GI-NEXT: fmov s29, w9 -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: ldr w13, [sp, #216] -; CHECK-GI-NEXT: sxtb w9, w10 -; CHECK-GI-NEXT: mov v23.s[2], w8 -; CHECK-GI-NEXT: ldr w8, [sp, #248] -; CHECK-GI-NEXT: mov v26.s[2], w11 -; CHECK-GI-NEXT: ldr w11, [sp, #304] -; CHECK-GI-NEXT: ldr w10, [sp, #272] -; CHECK-GI-NEXT: fmov s31, w9 -; CHECK-GI-NEXT: mov v29.s[1], w12 -; CHECK-GI-NEXT: ldr w9, [sp, #312] -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: sxtb w8, w8 -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: sxtb w13, w13 -; CHECK-GI-NEXT: ldr w12, [sp, #280] +; CHECK-GI-NEXT: mov v22.h[3], w10 +; CHECK-GI-NEXT: ldr w10, [sp, #144] +; CHECK-GI-NEXT: lsl w8, w8, #8 ; CHECK-GI-NEXT: fmov s16, wzr -; CHECK-GI-NEXT: mov v31.s[1], w11 -; CHECK-GI-NEXT: ldr w11, [sp, #328] -; CHECK-GI-NEXT: mov v23.s[3], w8 -; CHECK-GI-NEXT: sxtb w8, w9 -; CHECK-GI-NEXT: ldr w9, [sp, #360] -; CHECK-GI-NEXT: mov v29.s[2], w10 -; CHECK-GI-NEXT: sxtb w10, w11 -; CHECK-GI-NEXT: mov v26.s[3], w13 -; CHECK-GI-NEXT: ldr w13, [sp, #336] -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: ldr w11, [sp, #368] -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: mov v31.s[2], w8 -; CHECK-GI-NEXT: fmov s0, w10 -; CHECK-GI-NEXT: ldr w10, [sp, #320] -; CHECK-GI-NEXT: sxtb w13, w13 -; CHECK-GI-NEXT: fmov s12, w9 -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: mov v29.s[3], w12 -; CHECK-GI-NEXT: ldr w9, [sp, #376] -; CHECK-GI-NEXT: mov v0.s[1], w13 -; CHECK-GI-NEXT: ldr w13, [sp, #344] -; CHECK-GI-NEXT: ldr w8, [sp, #288] -; CHECK-GI-NEXT: mov v12.s[1], w11 -; CHECK-GI-NEXT: ldr w11, [sp, #392] -; CHECK-GI-NEXT: mov v31.s[3], w10 -; CHECK-GI-NEXT: ldr w10, [sp, #424] -; CHECK-GI-NEXT: sxtb w12, w13 -; CHECK-GI-NEXT: ldr w13, [sp, #400] -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: sxtb w8, w8 -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: mov v0.s[2], w12 -; CHECK-GI-NEXT: ldr w12, [sp, #432] -; CHECK-GI-NEXT: fmov s13, w11 -; CHECK-GI-NEXT: sxtb w13, w13 -; CHECK-GI-NEXT: mov v12.s[2], w9 -; CHECK-GI-NEXT: fmov s8, w10 -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: ldr w10, [sp, #440] -; CHECK-GI-NEXT: ldr w11, [sp, #384] -; CHECK-GI-NEXT: ldr w9, [sp, #352] +; CHECK-GI-NEXT: fmov s18, wzr ; CHECK-GI-NEXT: fmov s17, wzr -; CHECK-GI-NEXT: mov v13.s[1], w13 -; CHECK-GI-NEXT: ldr w13, [sp, #408] -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: mov v8.s[1], w12 -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: ldr w12, [sp, #456] -; CHECK-GI-NEXT: sxtb w13, w13 -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: fmov s3, wzr -; CHECK-GI-NEXT: mov v12.s[3], w11 -; CHECK-GI-NEXT: ldr w11, [sp, #488] -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: mov v13.s[2], w13 -; CHECK-GI-NEXT: ldr w13, [sp, #496] -; CHECK-GI-NEXT: mov v0.s[3], w9 -; CHECK-GI-NEXT: mov v8.s[2], w10 -; CHECK-GI-NEXT: ldr w10, [sp, #416] -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: ldr w9, [sp, #464] -; CHECK-GI-NEXT: fmov s14, w12 -; CHECK-GI-NEXT: sxtb w13, w13 -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: fmov s9, w11 -; CHECK-GI-NEXT: ldr w11, [sp, #504] -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: ldr w12, [sp, #448] -; CHECK-GI-NEXT: mul v27.4s, v25.4s, v0.4s -; CHECK-GI-NEXT: mov v13.s[3], w10 -; CHECK-GI-NEXT: ldr w10, [sp, #560] -; CHECK-GI-NEXT: sxtb w15, w11 -; CHECK-GI-NEXT: ldr w11, [sp, #568] -; CHECK-GI-NEXT: mov v9.s[1], w13 -; CHECK-GI-NEXT: ldr w13, [sp, #520] -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: mov v14.s[1], w9 -; CHECK-GI-NEXT: ldr w9, [sp, #472] -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: sxtb w13, w13 -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: fmov s10, w10 -; CHECK-GI-NEXT: ldr w10, [sp, #552] -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: fmov s15, w13 -; CHECK-GI-NEXT: mov v8.s[3], w12 -; CHECK-GI-NEXT: sxtb w12, w14 -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: mov v14.s[2], w9 -; CHECK-GI-NEXT: ldr w9, [sp, #480] -; CHECK-GI-NEXT: mov v10.s[1], w11 -; CHECK-GI-NEXT: ldr w11, [sp, #576] -; CHECK-GI-NEXT: mov v9.s[2], w15 -; CHECK-GI-NEXT: mul w8, w8, w10 -; CHECK-GI-NEXT: mov v15.s[1], w12 -; CHECK-GI-NEXT: ldr w12, [sp, #512] -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: ldr w10, [sp, #584] -; CHECK-GI-NEXT: ldr w13, [sp, #536] -; CHECK-GI-NEXT: mla v27.4s, v28.4s, v31.4s -; CHECK-GI-NEXT: mul v30.4s, v30.4s, v13.4s -; CHECK-GI-NEXT: mov v10.s[2], w11 -; CHECK-GI-NEXT: ldr w11, [sp, #592] -; CHECK-GI-NEXT: fmov s25, w8 -; CHECK-GI-NEXT: mov v14.s[3], w9 -; CHECK-GI-NEXT: sxtb w9, w12 -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: sxtb w8, w11 -; CHECK-GI-NEXT: ldr w11, [sp, #624] -; CHECK-GI-NEXT: sxtb w13, w13 -; CHECK-GI-NEXT: mov v9.s[3], w9 -; CHECK-GI-NEXT: ldr w9, [sp, #600] -; CHECK-GI-NEXT: mla v30.4s, v24.4s, v12.4s -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: mov v10.s[3], w10 -; CHECK-GI-NEXT: ldr w10, [sp, #632] -; CHECK-GI-NEXT: fmov s0, w8 -; CHECK-GI-NEXT: ldr w8, [sp, #656] -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: fmov s28, w11 -; CHECK-GI-NEXT: ldr w11, [sp, #688] -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: sxtb w8, w8 -; CHECK-GI-NEXT: mov v15.s[2], w13 -; CHECK-GI-NEXT: ldr w13, [sp, #544] -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: mov v0.s[1], w9 -; CHECK-GI-NEXT: ldr w9, [sp, #664] -; CHECK-GI-NEXT: mov v28.s[1], w10 -; CHECK-GI-NEXT: ldr w10, [sp, #696] -; CHECK-GI-NEXT: fmov s11, w8 -; CHECK-GI-NEXT: fmov s31, w11 -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: sxtb w12, w13 -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: ldr w11, [sp, #672] -; CHECK-GI-NEXT: ldr w8, [sp, #616] -; CHECK-GI-NEXT: mov v11.s[1], w9 -; CHECK-GI-NEXT: mov v15.s[3], w12 -; CHECK-GI-NEXT: ldr w12, [sp, #608] -; CHECK-GI-NEXT: mov v31.s[1], w10 -; CHECK-GI-NEXT: ldr w10, [sp, #704] -; CHECK-GI-NEXT: ldr w9, [sp, #640] -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: mul v24.4s, v26.4s, v14.4s -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: sxtb w8, w8 -; CHECK-GI-NEXT: mov v11.s[2], w11 -; CHECK-GI-NEXT: ldr w11, [sp, #712] -; CHECK-GI-NEXT: mov v0.s[2], w12 -; CHECK-GI-NEXT: mov v31.s[2], w10 -; CHECK-GI-NEXT: ldr w12, [sp, #648] -; CHECK-GI-NEXT: mov v28.s[2], w9 -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: ldr w10, [sp, #720] -; CHECK-GI-NEXT: ldr w9, [sp, #680] -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: mul v26.4s, v29.4s, v15.4s -; CHECK-GI-NEXT: mla v24.4s, v22.4s, v8.4s -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: mov v0.s[3], w8 -; CHECK-GI-NEXT: mov v31.s[3], w11 -; CHECK-GI-NEXT: ldr w11, [sp, #784] -; CHECK-GI-NEXT: mov v28.s[3], w12 -; CHECK-GI-NEXT: ldr w12, [sp, #752] -; CHECK-GI-NEXT: fmov s13, w10 -; CHECK-GI-NEXT: ldr w10, [sp, #792] -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: mov v11.s[3], w9 -; CHECK-GI-NEXT: ldr w9, [sp, #760] -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: ldr w8, [sp, #728] -; CHECK-GI-NEXT: fmov s14, w11 -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: ldr w11, [sp, #744] -; CHECK-GI-NEXT: fmov s12, w12 -; CHECK-GI-NEXT: ldr w12, [sp, #824] -; CHECK-GI-NEXT: sxtb w8, w8 -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: mla v26.4s, v23.4s, v9.4s -; CHECK-GI-NEXT: ldr w13, [sp, #984] -; CHECK-GI-NEXT: mov v14.s[1], w10 -; CHECK-GI-NEXT: sxtb w10, w12 -; CHECK-GI-NEXT: mov v13.s[1], w8 -; CHECK-GI-NEXT: mov v12.s[1], w9 -; CHECK-GI-NEXT: ldr w9, [sp, #832] -; CHECK-GI-NEXT: ldr w8, [sp, #736] -; CHECK-GI-NEXT: fmov s29, w10 -; CHECK-GI-NEXT: ldr w12, [sp, #768] -; CHECK-GI-NEXT: ldr w10, [sp, #800] -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: sxtb w8, w8 +; CHECK-GI-NEXT: lsl w10, w10, #8 +; CHECK-GI-NEXT: mov v23.h[3], w9 +; CHECK-GI-NEXT: sbfx w8, w8, #8, #8 +; CHECK-GI-NEXT: ldr w9, [sp, #120] +; CHECK-GI-NEXT: fmov s20, wzr ; CHECK-GI-NEXT: fmov s6, wzr -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: sxtb w10, w10 +; CHECK-GI-NEXT: sbfx w10, w10, #8, #8 +; CHECK-GI-NEXT: mov v22.h[4], w11 +; CHECK-GI-NEXT: lsl w11, w5, #8 +; CHECK-GI-NEXT: lsl w9, w9, #8 +; CHECK-GI-NEXT: fmov s7, wzr ; CHECK-GI-NEXT: fmov s2, wzr -; CHECK-GI-NEXT: mov v29.s[1], w9 -; CHECK-GI-NEXT: ldr w9, [sp, #840] -; CHECK-GI-NEXT: mov v13.s[2], w8 -; CHECK-GI-NEXT: mov v12.s[2], w12 -; CHECK-GI-NEXT: ldr w12, [sp, #808] -; CHECK-GI-NEXT: mov v14.s[2], w10 -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: ldr w8, [sp, #776] -; CHECK-GI-NEXT: ldr w10, [sp, #848] -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: fmov s5, wzr +; CHECK-GI-NEXT: fmov s24, w10 +; CHECK-GI-NEXT: mov v23.h[4], w8 +; CHECK-GI-NEXT: ldr w8, [sp, #160] +; CHECK-GI-NEXT: sbfx w11, w11, #8, #8 +; CHECK-GI-NEXT: ldr w10, [sp, #168] +; CHECK-GI-NEXT: sbfx w9, w9, #8, #8 +; CHECK-GI-NEXT: lsl w8, w8, #8 ; CHECK-GI-NEXT: fmov s4, wzr -; CHECK-GI-NEXT: mov v29.s[2], w9 -; CHECK-GI-NEXT: ldr w9, [sp, #856] -; CHECK-GI-NEXT: mov v13.s[3], w11 -; CHECK-GI-NEXT: ldr w11, [sp, #864] -; CHECK-GI-NEXT: mov v14.s[3], w12 -; CHECK-GI-NEXT: ldr w12, [sp, #888] -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: sxtb w8, w8 -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: fmov s7, wzr -; CHECK-GI-NEXT: fmov s15, w9 -; CHECK-GI-NEXT: ldr w9, [sp, #920] -; CHECK-GI-NEXT: mov v12.s[3], w8 -; CHECK-GI-NEXT: ldr w8, [sp, #872] -; CHECK-GI-NEXT: mov v29.s[3], w10 -; CHECK-GI-NEXT: ldr w10, [sp, #896] -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: fmov s22, w12 -; CHECK-GI-NEXT: ldr w12, [sp, #928] -; CHECK-GI-NEXT: mov v15.s[1], w11 -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: sxtb w8, w8 -; CHECK-GI-NEXT: fmov s8, w9 -; CHECK-GI-NEXT: ldr w9, [sp, #952] -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: ldr w11, [sp, #904] -; CHECK-GI-NEXT: mov v22.s[1], w10 -; CHECK-GI-NEXT: ldr w10, [sp, #936] -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: mov v19.s[2], wzr -; CHECK-GI-NEXT: mov v21.s[2], wzr -; CHECK-GI-NEXT: mov v15.s[2], w8 -; CHECK-GI-NEXT: ldr w8, [sp, #960] -; CHECK-GI-NEXT: mov v8.s[1], w12 -; CHECK-GI-NEXT: ldr w12, [sp, #880] -; CHECK-GI-NEXT: fmov s23, w9 -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: sxtb w8, w8 -; CHECK-GI-NEXT: ldr w9, [sp, #944] -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: mov v22.s[2], w11 -; CHECK-GI-NEXT: ldr w11, [sp, #912] -; CHECK-GI-NEXT: mov v8.s[2], w10 -; CHECK-GI-NEXT: ldr w10, [sp, #968] -; CHECK-GI-NEXT: mov v23.s[1], w8 -; CHECK-GI-NEXT: mov v15.s[3], w12 -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: sxtb w12, w13 -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: add v18.4s, v18.4s, v20.4s -; CHECK-GI-NEXT: mov v22.s[3], w11 -; CHECK-GI-NEXT: ldr w11, [sp, #992] -; CHECK-GI-NEXT: fmov s9, w12 -; CHECK-GI-NEXT: mov v23.s[2], w10 -; CHECK-GI-NEXT: ldr w10, [sp, #1048] -; CHECK-GI-NEXT: ldr w12, [sp, #1056] -; CHECK-GI-NEXT: mul v0.4s, v0.4s, v15.4s -; CHECK-GI-NEXT: sxtb w13, w11 -; CHECK-GI-NEXT: mov v8.s[3], w9 -; CHECK-GI-NEXT: sxtb w11, w10 -; CHECK-GI-NEXT: ldr w9, [sp, #1000] -; CHECK-GI-NEXT: sxtb w12, w12 -; CHECK-GI-NEXT: mov v9.s[1], w13 -; CHECK-GI-NEXT: ldr w10, [sp, #1016] -; CHECK-GI-NEXT: ldr w8, [sp, #816] -; CHECK-GI-NEXT: sxtb w9, w9 +; CHECK-GI-NEXT: fmov s3, wzr +; CHECK-GI-NEXT: mov v24.h[1], w12 +; CHECK-GI-NEXT: lsl w12, w6, #8 +; CHECK-GI-NEXT: mov v22.h[5], w11 +; CHECK-GI-NEXT: sbfx w8, w8, #8, #8 +; CHECK-GI-NEXT: lsl w10, w10, #8 +; CHECK-GI-NEXT: mov v23.h[5], w9 +; CHECK-GI-NEXT: sbfx w12, w12, #8, #8 +; CHECK-GI-NEXT: ldr w11, [sp, #184] +; CHECK-GI-NEXT: ldr w9, [sp, #192] +; CHECK-GI-NEXT: sbfx w10, w10, #8, #8 +; CHECK-GI-NEXT: fmov s5, wzr ; CHECK-GI-NEXT: fmov s1, wzr +; CHECK-GI-NEXT: mov v24.h[2], w8 +; CHECK-GI-NEXT: mov v22.h[6], w12 +; CHECK-GI-NEXT: ldr w12, [sp, #208] +; CHECK-GI-NEXT: mov v23.h[6], w13 +; CHECK-GI-NEXT: ldr w13, [sp, #216] +; CHECK-GI-NEXT: lsl w9, w9, #8 +; CHECK-GI-NEXT: lsl w12, w12, #8 +; CHECK-GI-NEXT: ldr w8, [sp, #200] +; CHECK-GI-NEXT: fmov s0, wzr +; CHECK-GI-NEXT: lsl w13, w13, #8 +; CHECK-GI-NEXT: sbfx w9, w9, #8, #8 +; CHECK-GI-NEXT: mov v19.s[1], wzr +; CHECK-GI-NEXT: mov v24.h[3], w10 +; CHECK-GI-NEXT: sbfx w10, w14, #8, #8 +; CHECK-GI-NEXT: ldr w14, [sp, #280] +; CHECK-GI-NEXT: mov v22.h[7], w16 +; CHECK-GI-NEXT: ldr w16, [sp, #288] +; CHECK-GI-NEXT: sbfx w12, w12, #8, #8 +; CHECK-GI-NEXT: lsl w14, w14, #8 +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: mov v23.h[7], w10 +; CHECK-GI-NEXT: lsl w18, w16, #8 +; CHECK-GI-NEXT: fmov s27, w12 +; CHECK-GI-NEXT: ldr w10, [sp, #232] +; CHECK-GI-NEXT: sbfx w16, w14, #8, #8 +; CHECK-GI-NEXT: mov v24.h[4], w15 +; CHECK-GI-NEXT: lsl w15, w11, #8 +; CHECK-GI-NEXT: sbfx w14, w18, #8, #8 +; CHECK-GI-NEXT: ldr w11, [sp, #296] +; CHECK-GI-NEXT: lsl w10, w10, #8 +; CHECK-GI-NEXT: fmov s25, w16 +; CHECK-GI-NEXT: ldr w16, [sp, #344] +; CHECK-GI-NEXT: mov v27.h[1], w13 +; CHECK-GI-NEXT: lsl w13, w17, #8 +; CHECK-GI-NEXT: lsl w11, w11, #8 +; CHECK-GI-NEXT: sbfx w15, w15, #8, #8 +; CHECK-GI-NEXT: lsl w16, w16, #8 +; CHECK-GI-NEXT: ldr w12, [sp, #240] +; CHECK-GI-NEXT: sbfx w17, w10, #8, #8 +; CHECK-GI-NEXT: mov v25.h[1], w14 +; CHECK-GI-NEXT: ldr w14, [sp, #352] +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: sbfx w16, w16, #8, #8 +; CHECK-GI-NEXT: sbfx w11, w11, #8, #8 +; CHECK-GI-NEXT: mov v24.h[5], w15 +; CHECK-GI-NEXT: mov v27.h[2], w13 +; CHECK-GI-NEXT: lsl w13, w14, #8 +; CHECK-GI-NEXT: ldr w14, [sp, #304] +; CHECK-GI-NEXT: fmov s26, w16 +; CHECK-GI-NEXT: lsl w12, w12, #8 +; CHECK-GI-NEXT: ldr w15, [sp, #248] +; CHECK-GI-NEXT: mov v25.h[2], w11 +; CHECK-GI-NEXT: ldr w11, [sp, #360] +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: lsl w14, w14, #8 +; CHECK-GI-NEXT: sbfx w12, w12, #8, #8 +; CHECK-GI-NEXT: mov v24.h[6], w9 +; CHECK-GI-NEXT: lsl w16, w11, #8 +; CHECK-GI-NEXT: mov v26.h[1], w13 +; CHECK-GI-NEXT: mov v27.h[3], w17 +; CHECK-GI-NEXT: sbfx w13, w14, #8, #8 +; CHECK-GI-NEXT: ldr w14, [sp, #312] +; CHECK-GI-NEXT: ldr w17, [sp, #328] +; CHECK-GI-NEXT: sbfx w16, w16, #8, #8 +; CHECK-GI-NEXT: ldr w10, [sp, #256] +; CHECK-GI-NEXT: ldr w11, [sp, #264] +; CHECK-GI-NEXT: mov v25.h[3], w13 +; CHECK-GI-NEXT: ldr w13, [sp, #368] +; CHECK-GI-NEXT: lsl w14, w14, #8 +; CHECK-GI-NEXT: mov v26.h[2], w16 +; CHECK-GI-NEXT: ldr w16, [sp, #320] +; CHECK-GI-NEXT: mov v27.h[4], w12 +; CHECK-GI-NEXT: lsl w13, w13, #8 +; CHECK-GI-NEXT: sbfx w9, w14, #8, #8 +; CHECK-GI-NEXT: lsl w14, w15, #8 +; CHECK-GI-NEXT: lsl w15, w16, #8 +; CHECK-GI-NEXT: ldr w16, [sp, #408] +; CHECK-GI-NEXT: lsl w10, w10, #8 +; CHECK-GI-NEXT: sbfx w12, w13, #8, #8 +; CHECK-GI-NEXT: ldr w13, [sp, #376] +; CHECK-GI-NEXT: mov v25.h[4], w9 +; CHECK-GI-NEXT: sbfx w9, w14, #8, #8 +; CHECK-GI-NEXT: sbfx w14, w15, #8, #8 +; CHECK-GI-NEXT: lsl w15, w16, #8 +; CHECK-GI-NEXT: mov v26.h[3], w12 +; CHECK-GI-NEXT: ldr w12, [sp, #416] +; CHECK-GI-NEXT: lsl w13, w13, #8 +; CHECK-GI-NEXT: sbfx w15, w15, #8, #8 +; CHECK-GI-NEXT: lsl w16, w17, #8 +; CHECK-GI-NEXT: mov v27.h[5], w9 +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: lsl w12, w12, #8 +; CHECK-GI-NEXT: mov v25.h[5], w14 +; CHECK-GI-NEXT: fmov s29, w15 +; CHECK-GI-NEXT: ldr w14, [sp, #384] +; CHECK-GI-NEXT: ldr w15, [sp, #472] +; CHECK-GI-NEXT: mov v26.h[4], w13 +; CHECK-GI-NEXT: ldr w13, [sp, #424] +; CHECK-GI-NEXT: sbfx w12, w12, #8, #8 +; CHECK-GI-NEXT: sbfx w16, w16, #8, #8 +; CHECK-GI-NEXT: lsl w14, w14, #8 +; CHECK-GI-NEXT: lsl w15, w15, #8 +; CHECK-GI-NEXT: lsl w13, w13, #8 +; CHECK-GI-NEXT: mov v29.h[1], w12 +; CHECK-GI-NEXT: ldr w12, [sp, #480] +; CHECK-GI-NEXT: sbfx w14, w14, #8, #8 +; CHECK-GI-NEXT: mov v25.h[6], w16 +; CHECK-GI-NEXT: ldr w16, [sp, #432] +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: sbfx w15, w15, #8, #8 +; CHECK-GI-NEXT: lsl w12, w12, #8 +; CHECK-GI-NEXT: mov v26.h[5], w14 +; CHECK-GI-NEXT: ldr w14, [sp, #392] +; CHECK-GI-NEXT: lsl w16, w16, #8 +; CHECK-GI-NEXT: mov v29.h[2], w13 +; CHECK-GI-NEXT: fmov s28, w15 +; CHECK-GI-NEXT: ldr w9, [sp, #336] +; CHECK-GI-NEXT: ldr w13, [sp, #488] +; CHECK-GI-NEXT: sbfx w12, w12, #8, #8 +; CHECK-GI-NEXT: lsl w14, w14, #8 +; CHECK-GI-NEXT: ldr w15, [sp, #440] +; CHECK-GI-NEXT: sbfx w16, w16, #8, #8 +; CHECK-GI-NEXT: lsl w9, w9, #8 +; CHECK-GI-NEXT: lsl w13, w13, #8 +; CHECK-GI-NEXT: mov v28.h[1], w12 +; CHECK-GI-NEXT: sbfx w14, w14, #8, #8 +; CHECK-GI-NEXT: lsl w15, w15, #8 +; CHECK-GI-NEXT: mov v29.h[3], w16 +; CHECK-GI-NEXT: ldr w16, [sp, #496] +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: sbfx w9, w9, #8, #8 +; CHECK-GI-NEXT: ldr w12, [sp, #400] +; CHECK-GI-NEXT: mov v26.h[6], w14 +; CHECK-GI-NEXT: ldr w14, [sp, #448] +; CHECK-GI-NEXT: sbfx w15, w15, #8, #8 +; CHECK-GI-NEXT: mov v28.h[2], w13 +; CHECK-GI-NEXT: lsl w16, w16, #8 +; CHECK-GI-NEXT: mov v25.h[7], w9 +; CHECK-GI-NEXT: lsl w12, w12, #8 +; CHECK-GI-NEXT: mov v29.h[4], w15 +; CHECK-GI-NEXT: lsl w14, w14, #8 +; CHECK-GI-NEXT: ldr w13, [sp, #456] +; CHECK-GI-NEXT: ldr w15, [sp, #504] +; CHECK-GI-NEXT: sbfx w16, w16, #8, #8 +; CHECK-GI-NEXT: sbfx w9, w12, #8, #8 +; CHECK-GI-NEXT: sbfx w12, w14, #8, #8 +; CHECK-GI-NEXT: lsl w11, w11, #8 +; CHECK-GI-NEXT: lsl w14, w15, #8 +; CHECK-GI-NEXT: mov v28.h[3], w16 +; CHECK-GI-NEXT: ldr w15, [sp, #512] +; CHECK-GI-NEXT: lsl w13, w13, #8 +; CHECK-GI-NEXT: mul v30.8h, v22.8h, v25.8h +; CHECK-GI-NEXT: mov v26.h[7], w9 +; CHECK-GI-NEXT: mov v29.h[5], w12 +; CHECK-GI-NEXT: lsl w8, w8, #8 +; CHECK-GI-NEXT: sbfx w9, w14, #8, #8 +; CHECK-GI-NEXT: sbfx w10, w10, #8, #8 +; CHECK-GI-NEXT: sbfx w14, w11, #8, #8 +; CHECK-GI-NEXT: sbfx w11, w13, #8, #8 +; CHECK-GI-NEXT: lsl w13, w15, #8 +; CHECK-GI-NEXT: ldr w17, [sp, #464] +; CHECK-GI-NEXT: sbfx w8, w8, #8, #8 +; CHECK-GI-NEXT: mov v28.h[4], w9 +; CHECK-GI-NEXT: mov v27.h[6], w10 +; CHECK-GI-NEXT: ldr w16, [sp, #520] +; CHECK-GI-NEXT: sbfx w10, w13, #8, #8 +; CHECK-GI-NEXT: smov w13, v30.h[0] +; CHECK-GI-NEXT: mov v24.h[7], w8 +; CHECK-GI-NEXT: lsl w8, w17, #8 +; CHECK-GI-NEXT: mov v29.h[6], w11 +; CHECK-GI-NEXT: mul v26.8h, v23.8h, v26.8h +; CHECK-GI-NEXT: lsl w15, w16, #8 +; CHECK-GI-NEXT: smov w16, v30.h[1] +; CHECK-GI-NEXT: ldr w12, [sp, #528] +; CHECK-GI-NEXT: sbfx w8, w8, #8, #8 +; CHECK-GI-NEXT: mov v28.h[5], w10 +; CHECK-GI-NEXT: mov v27.h[7], w14 +; CHECK-GI-NEXT: fmov s22, w13 +; CHECK-GI-NEXT: sbfx w10, w15, #8, #8 +; CHECK-GI-NEXT: smov w14, v30.h[4] +; CHECK-GI-NEXT: mov v29.h[7], w8 +; CHECK-GI-NEXT: smov w15, v26.h[0] +; CHECK-GI-NEXT: smov w13, v30.h[2] +; CHECK-GI-NEXT: lsl w12, w12, #8 +; CHECK-GI-NEXT: ldr w9, [sp, #544] +; CHECK-GI-NEXT: ldr w11, [sp, #552] +; CHECK-GI-NEXT: mov v22.s[1], w16 +; CHECK-GI-NEXT: smov w16, v26.h[4] +; CHECK-GI-NEXT: mov v28.h[6], w10 +; CHECK-GI-NEXT: smov w10, v26.h[1] +; CHECK-GI-NEXT: fmov s23, w14 +; CHECK-GI-NEXT: smov w14, v26.h[5] +; CHECK-GI-NEXT: mul v29.8h, v24.8h, v29.8h +; CHECK-GI-NEXT: fmov s24, w15 +; CHECK-GI-NEXT: smov w15, v26.h[2] +; CHECK-GI-NEXT: lsl w11, w11, #8 +; CHECK-GI-NEXT: smov w8, v30.h[5] +; CHECK-GI-NEXT: smov w17, v30.h[7] +; CHECK-GI-NEXT: fmov s25, w16 +; CHECK-GI-NEXT: mov v22.s[2], w13 +; CHECK-GI-NEXT: smov w13, v30.h[3] +; CHECK-GI-NEXT: mov v24.s[1], w10 +; CHECK-GI-NEXT: smov w16, v26.h[6] +; CHECK-GI-NEXT: sbfx w10, w12, #8, #8 +; CHECK-GI-NEXT: smov w18, v29.h[0] +; CHECK-GI-NEXT: smov w0, v29.h[1] +; CHECK-GI-NEXT: ldr w12, [sp, #560] +; CHECK-GI-NEXT: mov v25.s[1], w14 +; CHECK-GI-NEXT: smov w14, v26.h[7] +; CHECK-GI-NEXT: mov v28.h[7], w10 +; CHECK-GI-NEXT: mov v22.s[3], w13 +; CHECK-GI-NEXT: smov w13, v26.h[3] +; CHECK-GI-NEXT: sbfx w11, w11, #8, #8 +; CHECK-GI-NEXT: mov v24.s[2], w15 +; CHECK-GI-NEXT: smov w15, v29.h[2] +; CHECK-GI-NEXT: lsl w12, w12, #8 +; CHECK-GI-NEXT: fmov s26, w18 +; CHECK-GI-NEXT: mov v23.s[1], w8 +; CHECK-GI-NEXT: smov w8, v30.h[6] +; CHECK-GI-NEXT: mov v25.s[2], w16 +; CHECK-GI-NEXT: lsl w16, w9, #8 +; CHECK-GI-NEXT: mul v31.8h, v27.8h, v28.8h +; CHECK-GI-NEXT: ldr w10, [sp, #568] +; CHECK-GI-NEXT: sbfx w12, w12, #8, #8 +; CHECK-GI-NEXT: ldr w9, [sp, #584] +; CHECK-GI-NEXT: mov v24.s[3], w13 +; CHECK-GI-NEXT: smov w13, v29.h[4] +; CHECK-GI-NEXT: mov v26.s[1], w0 +; CHECK-GI-NEXT: sbfx w16, w16, #8, #8 +; CHECK-GI-NEXT: lsl w10, w10, #8 +; CHECK-GI-NEXT: mov v23.s[2], w8 +; CHECK-GI-NEXT: mov v25.s[3], w14 +; CHECK-GI-NEXT: ldr w14, [sp, #608] +; CHECK-GI-NEXT: ldr w8, [sp, #576] +; CHECK-GI-NEXT: fmov s8, w16 +; CHECK-GI-NEXT: ldr w16, [sp, #616] +; CHECK-GI-NEXT: sbfx w10, w10, #8, #8 +; CHECK-GI-NEXT: fmov s27, w13 +; CHECK-GI-NEXT: lsl w13, w14, #8 +; CHECK-GI-NEXT: mov v26.s[2], w15 +; CHECK-GI-NEXT: smov w15, v29.h[5] +; CHECK-GI-NEXT: lsl w16, w16, #8 +; CHECK-GI-NEXT: ldr w14, [sp, #624] +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: mov v8.h[1], w11 +; CHECK-GI-NEXT: lsl w8, w8, #8 +; CHECK-GI-NEXT: sbfx w16, w16, #8, #8 +; CHECK-GI-NEXT: lsl w14, w14, #8 +; CHECK-GI-NEXT: mov v23.s[3], w17 +; CHECK-GI-NEXT: fmov s9, w13 +; CHECK-GI-NEXT: ldr w13, [sp, #632] +; CHECK-GI-NEXT: smov w17, v31.h[1] +; CHECK-GI-NEXT: mov v27.s[1], w15 +; CHECK-GI-NEXT: smov w15, v31.h[0] +; CHECK-GI-NEXT: sbfx w14, w14, #8, #8 +; CHECK-GI-NEXT: mov v8.h[2], w12 +; CHECK-GI-NEXT: lsl w13, w13, #8 +; CHECK-GI-NEXT: sbfx w8, w8, #8, #8 +; CHECK-GI-NEXT: mov v9.h[1], w16 +; CHECK-GI-NEXT: smov w16, v31.h[2] +; CHECK-GI-NEXT: lsl w9, w9, #8 +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: ldr w11, [sp, #592] +; CHECK-GI-NEXT: ldr w12, [sp, #600] +; CHECK-GI-NEXT: fmov s28, w15 +; CHECK-GI-NEXT: smov w15, v29.h[6] +; CHECK-GI-NEXT: sbfx w9, w9, #8, #8 +; CHECK-GI-NEXT: mov v8.h[3], w10 +; CHECK-GI-NEXT: ldr w10, [sp, #640] +; CHECK-GI-NEXT: lsl w11, w11, #8 +; CHECK-GI-NEXT: mov v9.h[2], w14 +; CHECK-GI-NEXT: ldr w14, [sp, #672] +; CHECK-GI-NEXT: lsl w12, w12, #8 +; CHECK-GI-NEXT: mov v28.s[1], w17 +; CHECK-GI-NEXT: lsl w10, w10, #8 +; CHECK-GI-NEXT: sbfx w11, w11, #8, #8 +; CHECK-GI-NEXT: lsl w14, w14, #8 +; CHECK-GI-NEXT: mov v27.s[2], w15 +; CHECK-GI-NEXT: ldr w15, [sp, #680] +; CHECK-GI-NEXT: mov v8.h[4], w8 +; CHECK-GI-NEXT: smov w8, v31.h[4] +; CHECK-GI-NEXT: sbfx w12, w12, #8, #8 +; CHECK-GI-NEXT: sbfx w14, w14, #8, #8 +; CHECK-GI-NEXT: lsl w15, w15, #8 +; CHECK-GI-NEXT: mov v9.h[3], w13 +; CHECK-GI-NEXT: ldr w13, [sp, #688] +; CHECK-GI-NEXT: mov v28.s[2], w16 +; CHECK-GI-NEXT: ldr w16, [sp, #648] +; CHECK-GI-NEXT: fmov s10, w14 +; CHECK-GI-NEXT: sbfx w15, w15, #8, #8 +; CHECK-GI-NEXT: ldr w14, [sp, #656] +; CHECK-GI-NEXT: lsl w13, w13, #8 +; CHECK-GI-NEXT: fmov s30, w8 +; CHECK-GI-NEXT: sbfx w8, w10, #8, #8 +; CHECK-GI-NEXT: smov w10, v31.h[5] +; CHECK-GI-NEXT: mov v8.h[5], w9 +; CHECK-GI-NEXT: ldr w9, [sp, #696] +; CHECK-GI-NEXT: mov v10.h[1], w15 +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: mov v9.h[4], w8 +; CHECK-GI-NEXT: lsl w16, w16, #8 +; CHECK-GI-NEXT: lsl w9, w9, #8 +; CHECK-GI-NEXT: lsl w14, w14, #8 +; CHECK-GI-NEXT: ldr w8, [sp, #704] +; CHECK-GI-NEXT: ldr w15, [sp, #664] +; CHECK-GI-NEXT: ldr w17, [sp, #768] +; CHECK-GI-NEXT: mov v30.s[1], w10 +; CHECK-GI-NEXT: ldr w10, [sp, #744] +; CHECK-GI-NEXT: sbfx w16, w16, #8, #8 +; CHECK-GI-NEXT: mov v10.h[2], w13 +; CHECK-GI-NEXT: ldr w13, [sp, #736] +; CHECK-GI-NEXT: sbfx w9, w9, #8, #8 +; CHECK-GI-NEXT: lsl w10, w10, #8 +; CHECK-GI-NEXT: mov v9.h[5], w16 +; CHECK-GI-NEXT: mov v8.h[6], w11 +; CHECK-GI-NEXT: lsl w13, w13, #8 +; CHECK-GI-NEXT: ldr w11, [sp, #712] +; CHECK-GI-NEXT: lsl w8, w8, #8 +; CHECK-GI-NEXT: sbfx w10, w10, #8, #8 +; CHECK-GI-NEXT: ldr w16, [sp, #720] +; CHECK-GI-NEXT: lsl w15, w15, #8 +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: mov v10.h[3], w9 +; CHECK-GI-NEXT: ldr w9, [sp, #752] +; CHECK-GI-NEXT: mov v8.h[7], w12 +; CHECK-GI-NEXT: sbfx w12, w8, #8, #8 +; CHECK-GI-NEXT: lsl w18, w16, #8 +; CHECK-GI-NEXT: fmov s11, w13 +; CHECK-GI-NEXT: ldr w13, [sp, #760] +; CHECK-GI-NEXT: ldr w8, [sp, #784] +; CHECK-GI-NEXT: mov v21.s[1], wzr ; CHECK-GI-NEXT: mov v16.s[1], wzr -; CHECK-GI-NEXT: mla v0.4s, v10.4s, v29.4s -; CHECK-GI-NEXT: fmov s10, w11 -; CHECK-GI-NEXT: sxtb w10, w10 -; CHECK-GI-NEXT: ldr w11, [sp, #1024] -; CHECK-GI-NEXT: mul v20.4s, v11.4s, v8.4s -; CHECK-GI-NEXT: ldr q8, [sp] // 16-byte Folded Reload -; CHECK-GI-NEXT: mov v9.s[2], w9 -; CHECK-GI-NEXT: ldr w9, [sp, #1008] -; CHECK-GI-NEXT: fmov s29, w10 -; CHECK-GI-NEXT: mov v10.s[1], w12 -; CHECK-GI-NEXT: ldr w12, [sp, #1064] -; CHECK-GI-NEXT: sxtb w11, w11 -; CHECK-GI-NEXT: sxtb w9, w9 +; CHECK-GI-NEXT: mov v18.s[1], wzr +; CHECK-GI-NEXT: lsl w13, w13, #8 +; CHECK-GI-NEXT: mov v10.h[4], w12 +; CHECK-GI-NEXT: sbfx w12, w15, #8, #8 +; CHECK-GI-NEXT: mov v11.h[1], w10 +; CHECK-GI-NEXT: sbfx w10, w14, #8, #8 +; CHECK-GI-NEXT: lsl w14, w9, #8 +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: ldr w9, [sp, #776] +; CHECK-GI-NEXT: lsl w8, w8, #8 +; CHECK-GI-NEXT: sbfx w14, w14, #8, #8 +; CHECK-GI-NEXT: mov v9.h[6], w10 +; CHECK-GI-NEXT: lsl w10, w11, #8 +; CHECK-GI-NEXT: ldr w11, [sp, #808] +; CHECK-GI-NEXT: lsl w9, w9, #8 +; CHECK-GI-NEXT: sbfx w8, w8, #8, #8 +; CHECK-GI-NEXT: mov v11.h[2], w14 +; CHECK-GI-NEXT: ldr w14, [sp, #816] +; CHECK-GI-NEXT: sbfx w10, w10, #8, #8 +; CHECK-GI-NEXT: lsl w11, w11, #8 +; CHECK-GI-NEXT: sbfx w9, w9, #8, #8 ; CHECK-GI-NEXT: mov v17.s[1], wzr -; CHECK-GI-NEXT: mov v3.s[1], wzr -; CHECK-GI-NEXT: sxtb w12, w12 +; CHECK-GI-NEXT: lsl w14, w14, #8 +; CHECK-GI-NEXT: mov v9.h[7], w12 +; CHECK-GI-NEXT: ldr w12, [sp, #824] +; CHECK-GI-NEXT: sbfx w16, w11, #8, #8 +; CHECK-GI-NEXT: mov v10.h[5], w10 +; CHECK-GI-NEXT: ldr w10, [sp, #832] +; CHECK-GI-NEXT: mov v11.h[3], w13 +; CHECK-GI-NEXT: sbfx w15, w14, #8, #8 +; CHECK-GI-NEXT: lsl w14, w17, #8 +; CHECK-GI-NEXT: fmov s12, w16 +; CHECK-GI-NEXT: ldr w16, [sp, #872] +; CHECK-GI-NEXT: lsl w12, w12, #8 +; CHECK-GI-NEXT: sbfx w14, w14, #8, #8 +; CHECK-GI-NEXT: ldr w11, [sp, #840] +; CHECK-GI-NEXT: sbfx w13, w18, #8, #8 +; CHECK-GI-NEXT: sbfx w17, w12, #8, #8 +; CHECK-GI-NEXT: lsl w16, w16, #8 +; CHECK-GI-NEXT: ldr w12, [sp, #856] +; CHECK-GI-NEXT: mov v12.h[1], w15 +; CHECK-GI-NEXT: mov v11.h[4], w14 +; CHECK-GI-NEXT: ldr w15, [sp, #880] +; CHECK-GI-NEXT: lsl w11, w11, #8 +; CHECK-GI-NEXT: mov v10.h[6], w13 +; CHECK-GI-NEXT: ldr w13, [sp, #848] +; CHECK-GI-NEXT: lsl w14, w15, #8 +; CHECK-GI-NEXT: sbfx w15, w16, #8, #8 +; CHECK-GI-NEXT: ldr w16, [sp, #888] +; CHECK-GI-NEXT: lsl w13, w13, #8 +; CHECK-GI-NEXT: lsl w12, w12, #8 +; CHECK-GI-NEXT: mov v20.s[1], wzr +; CHECK-GI-NEXT: mov v12.h[2], w17 +; CHECK-GI-NEXT: lsl w17, w10, #8 +; CHECK-GI-NEXT: mov v11.h[5], w9 +; CHECK-GI-NEXT: fmov s13, w15 +; CHECK-GI-NEXT: ldr w9, [sp, #936] +; CHECK-GI-NEXT: sbfx w14, w14, #8, #8 +; CHECK-GI-NEXT: sbfx w15, w17, #8, #8 +; CHECK-GI-NEXT: lsl w16, w16, #8 +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: lsl w9, w9, #8 +; CHECK-GI-NEXT: sbfx w12, w12, #8, #8 +; CHECK-GI-NEXT: ldr w10, [sp, #864] +; CHECK-GI-NEXT: mov v12.h[3], w15 +; CHECK-GI-NEXT: mov v11.h[6], w8 +; CHECK-GI-NEXT: sbfx w8, w11, #8, #8 +; CHECK-GI-NEXT: ldr w11, [sp, #1000] +; CHECK-GI-NEXT: mov v13.h[1], w14 +; CHECK-GI-NEXT: ldr w15, [sp, #944] +; CHECK-GI-NEXT: sbfx w9, w9, #8, #8 +; CHECK-GI-NEXT: ldr w14, [sp, #896] +; CHECK-GI-NEXT: sbfx w16, w16, #8, #8 +; CHECK-GI-NEXT: lsl w11, w11, #8 +; CHECK-GI-NEXT: lsl w15, w15, #8 +; CHECK-GI-NEXT: lsl w10, w10, #8 +; CHECK-GI-NEXT: mov v12.h[4], w8 +; CHECK-GI-NEXT: ldr w8, [sp, #1008] +; CHECK-GI-NEXT: fmov s14, w9 +; CHECK-GI-NEXT: sbfx w11, w11, #8, #8 +; CHECK-GI-NEXT: mov v13.h[2], w16 +; CHECK-GI-NEXT: ldr w16, [sp, #952] +; CHECK-GI-NEXT: lsl w14, w14, #8 +; CHECK-GI-NEXT: sbfx w15, w15, #8, #8 +; CHECK-GI-NEXT: lsl w17, w8, #8 +; CHECK-GI-NEXT: smov w8, v29.h[3] +; CHECK-GI-NEXT: smov w9, v29.h[7] +; CHECK-GI-NEXT: fmov s29, w11 +; CHECK-GI-NEXT: sbfx w14, w14, #8, #8 +; CHECK-GI-NEXT: mov v14.h[1], w15 +; CHECK-GI-NEXT: sbfx w15, w17, #8, #8 +; CHECK-GI-NEXT: ldr w11, [sp, #904] +; CHECK-GI-NEXT: lsl w16, w16, #8 +; CHECK-GI-NEXT: mov v12.h[5], w13 +; CHECK-GI-NEXT: mov v13.h[3], w14 +; CHECK-GI-NEXT: mov v29.h[1], w15 +; CHECK-GI-NEXT: ldr w15, [sp, #960] +; CHECK-GI-NEXT: lsl w11, w11, #8 +; CHECK-GI-NEXT: sbfx w16, w16, #8, #8 +; CHECK-GI-NEXT: ldr w14, [sp, #1016] +; CHECK-GI-NEXT: lsl w15, w15, #8 +; CHECK-GI-NEXT: ldr w13, [sp, #1024] +; CHECK-GI-NEXT: sbfx w10, w10, #8, #8 +; CHECK-GI-NEXT: sbfx w11, w11, #8, #8 +; CHECK-GI-NEXT: mov v14.h[2], w16 +; CHECK-GI-NEXT: lsl w14, w14, #8 +; CHECK-GI-NEXT: sbfx w15, w15, #8, #8 +; CHECK-GI-NEXT: ldr w16, [sp, #912] +; CHECK-GI-NEXT: lsl w13, w13, #8 +; CHECK-GI-NEXT: mov v13.h[4], w11 +; CHECK-GI-NEXT: ldr w11, [sp, #968] +; CHECK-GI-NEXT: sbfx w14, w14, #8, #8 +; CHECK-GI-NEXT: mov v12.h[6], w12 +; CHECK-GI-NEXT: ldr w12, [sp, #976] +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: mov v14.h[3], w15 +; CHECK-GI-NEXT: lsl w11, w11, #8 +; CHECK-GI-NEXT: mov v29.h[2], w14 +; CHECK-GI-NEXT: ldr w15, [sp, #1032] +; CHECK-GI-NEXT: lsl w16, w16, #8 +; CHECK-GI-NEXT: lsl w12, w12, #8 +; CHECK-GI-NEXT: sbfx w11, w11, #8, #8 +; CHECK-GI-NEXT: ldr w14, [sp, #920] +; CHECK-GI-NEXT: mov v26.s[3], w8 +; CHECK-GI-NEXT: sbfx w16, w16, #8, #8 +; CHECK-GI-NEXT: lsl w15, w15, #8 +; CHECK-GI-NEXT: sbfx w12, w12, #8, #8 +; CHECK-GI-NEXT: mov v14.h[4], w11 +; CHECK-GI-NEXT: mov v29.h[3], w13 +; CHECK-GI-NEXT: ldr w11, [sp, #984] +; CHECK-GI-NEXT: lsl w14, w14, #8 +; CHECK-GI-NEXT: sbfx w15, w15, #8, #8 +; CHECK-GI-NEXT: mov v13.h[5], w16 +; CHECK-GI-NEXT: ldr w16, [sp, #1040] +; CHECK-GI-NEXT: lsl w11, w11, #8 +; CHECK-GI-NEXT: ldr w13, [sp, #928] +; CHECK-GI-NEXT: sbfx w14, w14, #8, #8 +; CHECK-GI-NEXT: mov v12.h[7], w10 +; CHECK-GI-NEXT: mov v27.s[3], w9 +; CHECK-GI-NEXT: mov v14.h[5], w12 +; CHECK-GI-NEXT: mov v29.h[4], w15 +; CHECK-GI-NEXT: lsl w16, w16, #8 +; CHECK-GI-NEXT: sbfx w10, w11, #8, #8 +; CHECK-GI-NEXT: lsl w13, w13, #8 +; CHECK-GI-NEXT: mov v13.h[6], w14 +; CHECK-GI-NEXT: ldr w12, [sp, #1048] +; CHECK-GI-NEXT: sbfx w14, w16, #8, #8 +; CHECK-GI-NEXT: ldr w11, [sp, #728] +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: mul v15.8h, v8.8h, v12.8h +; CHECK-GI-NEXT: smov w16, v31.h[6] +; CHECK-GI-NEXT: mov v14.h[6], w10 +; CHECK-GI-NEXT: ldr w10, [sp, #992] +; CHECK-GI-NEXT: mov v29.h[5], w14 +; CHECK-GI-NEXT: lsl w12, w12, #8 +; CHECK-GI-NEXT: lsl w11, w11, #8 +; CHECK-GI-NEXT: mov v13.h[7], w13 +; CHECK-GI-NEXT: lsl w10, w10, #8 +; CHECK-GI-NEXT: ldr w13, [sp, #792] +; CHECK-GI-NEXT: ldr w14, [sp, #1056] +; CHECK-GI-NEXT: sbfx w12, w12, #8, #8 +; CHECK-GI-NEXT: sbfx w11, w11, #8, #8 +; CHECK-GI-NEXT: mov v30.s[2], w16 +; CHECK-GI-NEXT: sbfx w10, w10, #8, #8 +; CHECK-GI-NEXT: smov w8, v15.h[1] +; CHECK-GI-NEXT: smov w9, v15.h[5] +; CHECK-GI-NEXT: mov v29.h[6], w12 +; CHECK-GI-NEXT: lsl w12, w13, #8 +; CHECK-GI-NEXT: lsl w13, w14, #8 +; CHECK-GI-NEXT: mov v10.h[7], w11 +; CHECK-GI-NEXT: mov v14.h[7], w10 +; CHECK-GI-NEXT: mul v12.8h, v9.8h, v13.8h +; CHECK-GI-NEXT: sbfx w12, w12, #8, #8 +; CHECK-GI-NEXT: sbfx w13, w13, #8, #8 +; CHECK-GI-NEXT: smov w10, v15.h[0] +; CHECK-GI-NEXT: smov w11, v15.h[4] +; CHECK-GI-NEXT: smov w14, v31.h[7] +; CHECK-GI-NEXT: smov w15, v31.h[3] +; CHECK-GI-NEXT: mov v11.h[7], w12 +; CHECK-GI-NEXT: mov v29.h[7], w13 ; CHECK-GI-NEXT: mov v6.s[1], wzr +; CHECK-GI-NEXT: mul v13.8h, v10.8h, v14.8h +; CHECK-GI-NEXT: smov w12, v12.h[0] +; CHECK-GI-NEXT: smov w13, v12.h[1] +; CHECK-GI-NEXT: mov v7.s[1], wzr ; CHECK-GI-NEXT: mov v2.s[1], wzr -; CHECK-GI-NEXT: mov v5.s[1], wzr ; CHECK-GI-NEXT: mov v4.s[1], wzr -; CHECK-GI-NEXT: mov v7.s[1], wzr -; CHECK-GI-NEXT: mov v10.s[2], w12 -; CHECK-GI-NEXT: ldr w12, [sp, #1080] -; CHECK-GI-NEXT: mov v8.s[1], wzr -; CHECK-GI-NEXT: mov v9.s[3], w9 -; CHECK-GI-NEXT: sxtb w8, w8 -; CHECK-GI-NEXT: ldr w10, [sp, #1032] -; CHECK-GI-NEXT: sxtb w9, w12 -; CHECK-GI-NEXT: mov v29.s[1], w11 -; CHECK-GI-NEXT: ldr w11, [sp, #1072] -; CHECK-GI-NEXT: mov v19.s[3], wzr -; CHECK-GI-NEXT: mov v21.s[3], wzr +; CHECK-GI-NEXT: fmov s31, w11 +; CHECK-GI-NEXT: mov v30.s[3], w14 +; CHECK-GI-NEXT: smov w11, v12.h[4] +; CHECK-GI-NEXT: mul v14.8h, v11.8h, v29.8h +; CHECK-GI-NEXT: fmov s29, w10 +; CHECK-GI-NEXT: smov w10, v15.h[2] +; CHECK-GI-NEXT: smov w14, v13.h[0] +; CHECK-GI-NEXT: fmov s8, w12 +; CHECK-GI-NEXT: smov w16, v13.h[1] +; CHECK-GI-NEXT: mov v31.s[1], w9 +; CHECK-GI-NEXT: smov w9, v12.h[2] +; CHECK-GI-NEXT: mov v28.s[3], w15 +; CHECK-GI-NEXT: mov v29.s[1], w8 +; CHECK-GI-NEXT: smov w8, v15.h[6] +; CHECK-GI-NEXT: smov w15, v12.h[5] +; CHECK-GI-NEXT: mov v8.s[1], w13 +; CHECK-GI-NEXT: fmov s9, w11 +; CHECK-GI-NEXT: smov w12, v15.h[3] +; CHECK-GI-NEXT: fmov s10, w14 +; CHECK-GI-NEXT: smov w14, v13.h[2] +; CHECK-GI-NEXT: smov w11, v12.h[6] +; CHECK-GI-NEXT: smov w13, v15.h[7] +; CHECK-GI-NEXT: mov v3.s[1], wzr +; CHECK-GI-NEXT: mov v5.s[1], wzr +; CHECK-GI-NEXT: mov v31.s[2], w8 +; CHECK-GI-NEXT: smov w8, v13.h[4] +; CHECK-GI-NEXT: mov v29.s[2], w10 +; CHECK-GI-NEXT: mov v10.s[1], w16 +; CHECK-GI-NEXT: smov w16, v14.h[0] +; CHECK-GI-NEXT: mov v8.s[2], w9 +; CHECK-GI-NEXT: smov w9, v13.h[5] +; CHECK-GI-NEXT: smov w10, v12.h[3] +; CHECK-GI-NEXT: mov v9.s[1], w15 +; CHECK-GI-NEXT: smov w15, v13.h[6] ; CHECK-GI-NEXT: mov v1.s[1], wzr -; CHECK-GI-NEXT: mul w8, w8, w9 +; CHECK-GI-NEXT: mov v0.s[1], wzr +; CHECK-GI-NEXT: fmov s11, w8 +; CHECK-GI-NEXT: smov w8, v14.h[1] +; CHECK-GI-NEXT: mov v29.s[3], w12 +; CHECK-GI-NEXT: mov v10.s[2], w14 +; CHECK-GI-NEXT: smov w14, v12.h[7] +; CHECK-GI-NEXT: fmov s12, w16 +; CHECK-GI-NEXT: smov w12, v14.h[4] +; CHECK-GI-NEXT: mov v8.s[3], w10 +; CHECK-GI-NEXT: ldr w10, [sp, #536] +; CHECK-GI-NEXT: mov v11.s[1], w9 +; CHECK-GI-NEXT: ldr w9, [sp, #272] +; CHECK-GI-NEXT: mov v9.s[2], w11 +; CHECK-GI-NEXT: ldr w11, [sp, #800] +; CHECK-GI-NEXT: mov v12.s[1], w8 +; CHECK-GI-NEXT: ldr w8, [sp, #1064] +; CHECK-GI-NEXT: mov v31.s[3], w13 +; CHECK-GI-NEXT: smov w13, v14.h[5] +; CHECK-GI-NEXT: sxtb w9, w9 ; CHECK-GI-NEXT: sxtb w10, w10 ; CHECK-GI-NEXT: sxtb w11, w11 +; CHECK-GI-NEXT: sxtb w8, w8 +; CHECK-GI-NEXT: mov v11.s[2], w15 +; CHECK-GI-NEXT: smov w15, v13.h[3] +; CHECK-GI-NEXT: smov w16, v13.h[7] +; CHECK-GI-NEXT: fmov s13, w12 +; CHECK-GI-NEXT: mul w9, w9, w10 +; CHECK-GI-NEXT: smov w12, v14.h[2] +; CHECK-GI-NEXT: mul w8, w11, w8 +; CHECK-GI-NEXT: mov v19.s[2], wzr +; CHECK-GI-NEXT: mov v21.s[2], wzr ; CHECK-GI-NEXT: mov v16.s[2], wzr +; CHECK-GI-NEXT: mov v18.s[2], wzr ; CHECK-GI-NEXT: mov v17.s[2], wzr -; CHECK-GI-NEXT: mov v3.s[2], wzr +; CHECK-GI-NEXT: mov v13.s[1], w13 +; CHECK-GI-NEXT: smov w13, v14.h[6] +; CHECK-GI-NEXT: sxth w9, w9 +; CHECK-GI-NEXT: sxth w10, w8 +; CHECK-GI-NEXT: mov v20.s[2], wzr ; CHECK-GI-NEXT: mov v6.s[2], wzr +; CHECK-GI-NEXT: mov v7.s[2], wzr ; CHECK-GI-NEXT: mov v2.s[2], wzr -; CHECK-GI-NEXT: mov v5.s[2], wzr ; CHECK-GI-NEXT: mov v4.s[2], wzr -; CHECK-GI-NEXT: mov v7.s[2], wzr -; CHECK-GI-NEXT: mov v8.s[2], wzr -; CHECK-GI-NEXT: mov v29.s[2], w10 -; CHECK-GI-NEXT: mov v10.s[3], w11 -; CHECK-GI-NEXT: add v19.4s, v19.4s, v21.4s -; CHECK-GI-NEXT: ldr w9, [sp, #976] -; CHECK-GI-NEXT: fmov s21, w8 -; CHECK-GI-NEXT: ldr w8, [sp, #1040] +; CHECK-GI-NEXT: mov v3.s[2], wzr +; CHECK-GI-NEXT: mov v5.s[2], wzr +; CHECK-GI-NEXT: add v22.4s, v22.4s, v23.4s +; CHECK-GI-NEXT: add v25.4s, v24.4s, v25.4s +; CHECK-GI-NEXT: fmov s23, w9 +; CHECK-GI-NEXT: fmov s24, w10 +; CHECK-GI-NEXT: mov v12.s[2], w12 +; CHECK-GI-NEXT: mov v13.s[2], w13 +; CHECK-GI-NEXT: smov w8, v14.h[3] +; CHECK-GI-NEXT: smov w9, v14.h[7] ; CHECK-GI-NEXT: mov v1.s[2], wzr +; CHECK-GI-NEXT: mov v0.s[2], wzr +; CHECK-GI-NEXT: mov v19.s[3], wzr +; CHECK-GI-NEXT: mov v21.s[3], wzr ; CHECK-GI-NEXT: mov v16.s[3], wzr +; CHECK-GI-NEXT: mov v18.s[3], wzr ; CHECK-GI-NEXT: mov v17.s[3], wzr -; CHECK-GI-NEXT: sxtb w9, w9 -; CHECK-GI-NEXT: sxtb w8, w8 -; CHECK-GI-NEXT: mov v11.16b, v8.16b -; CHECK-GI-NEXT: mov v3.s[3], wzr +; CHECK-GI-NEXT: mov v20.s[3], wzr ; CHECK-GI-NEXT: mov v6.s[3], wzr +; CHECK-GI-NEXT: mov v7.s[3], wzr ; CHECK-GI-NEXT: mov v2.s[3], wzr -; CHECK-GI-NEXT: mov v5.s[3], wzr ; CHECK-GI-NEXT: mov v4.s[3], wzr -; CHECK-GI-NEXT: mov v7.s[3], wzr -; CHECK-GI-NEXT: mov v25.s[1], wzr -; CHECK-GI-NEXT: mov v21.s[1], wzr -; CHECK-GI-NEXT: mul v8.4s, v13.4s, v9.4s -; CHECK-GI-NEXT: mul v9.4s, v14.4s, v10.4s -; CHECK-GI-NEXT: mov v23.s[3], w9 -; CHECK-GI-NEXT: mov v29.s[3], w8 +; CHECK-GI-NEXT: mov v3.s[3], wzr +; CHECK-GI-NEXT: mov v5.s[3], wzr +; CHECK-GI-NEXT: mov v23.s[1], wzr +; CHECK-GI-NEXT: mov v24.s[1], wzr +; CHECK-GI-NEXT: mov v9.s[3], w14 +; CHECK-GI-NEXT: mov v10.s[3], w15 +; CHECK-GI-NEXT: mov v11.s[3], w16 ; CHECK-GI-NEXT: mov v1.s[3], wzr -; CHECK-GI-NEXT: mov v11.s[3], wzr -; CHECK-GI-NEXT: add v16.4s, v16.4s, v17.4s -; CHECK-GI-NEXT: add v3.4s, v3.4s, v6.4s -; CHECK-GI-NEXT: add v2.4s, v2.4s, v5.4s -; CHECK-GI-NEXT: add v4.4s, v4.4s, v7.4s -; CHECK-GI-NEXT: mov v25.s[2], wzr -; CHECK-GI-NEXT: mov v21.s[2], wzr -; CHECK-GI-NEXT: mla v20.4s, v28.4s, v22.4s -; CHECK-GI-NEXT: mla v8.4s, v31.4s, v23.4s -; CHECK-GI-NEXT: mla v9.4s, v12.4s, v29.4s -; CHECK-GI-NEXT: add v5.4s, v19.4s, v16.4s -; CHECK-GI-NEXT: add v1.4s, v1.4s, v18.4s -; CHECK-GI-NEXT: add v3.4s, v11.4s, v3.4s +; CHECK-GI-NEXT: mov v12.s[3], w8 +; CHECK-GI-NEXT: mov v13.s[3], w9 +; CHECK-GI-NEXT: mov v0.s[3], wzr +; CHECK-GI-NEXT: add v19.4s, v19.4s, v21.4s +; CHECK-GI-NEXT: add v16.4s, v16.4s, v18.4s +; CHECK-GI-NEXT: add v17.4s, v17.4s, v20.4s +; CHECK-GI-NEXT: add v6.4s, v6.4s, v7.4s ; CHECK-GI-NEXT: add v2.4s, v2.4s, v4.4s -; CHECK-GI-NEXT: add v4.4s, v27.4s, v30.4s -; CHECK-GI-NEXT: add v6.4s, v24.4s, v26.4s -; CHECK-GI-NEXT: ldr x29, [sp, #80] // 8-byte Folded Reload -; CHECK-GI-NEXT: mov v25.s[3], wzr -; CHECK-GI-NEXT: mov v21.s[3], wzr -; CHECK-GI-NEXT: add v0.4s, v0.4s, v20.4s -; CHECK-GI-NEXT: add v1.4s, v1.4s, v5.4s -; CHECK-GI-NEXT: add v5.4s, v8.4s, v9.4s -; CHECK-GI-NEXT: add v2.4s, v3.4s, v2.4s -; CHECK-GI-NEXT: add v3.4s, v4.4s, v6.4s -; CHECK-GI-NEXT: ldp d9, d8, [sp, #64] // 16-byte Folded Reload -; CHECK-GI-NEXT: ldp d11, d10, [sp, #48] // 16-byte Folded Reload -; CHECK-GI-NEXT: add v1.4s, v25.4s, v1.4s -; CHECK-GI-NEXT: add v0.4s, v0.4s, v5.4s -; CHECK-GI-NEXT: add v2.4s, v21.4s, v2.4s -; CHECK-GI-NEXT: ldp d13, d12, [sp, #32] // 16-byte Folded Reload -; CHECK-GI-NEXT: ldp d15, d14, [sp, #16] // 16-byte Folded Reload -; CHECK-GI-NEXT: add v1.4s, v3.4s, v1.4s +; CHECK-GI-NEXT: add v3.4s, v3.4s, v5.4s +; CHECK-GI-NEXT: mov v23.s[2], wzr +; CHECK-GI-NEXT: mov v24.s[2], wzr +; CHECK-GI-NEXT: add v26.4s, v26.4s, v27.4s +; CHECK-GI-NEXT: add v27.4s, v28.4s, v30.4s +; CHECK-GI-NEXT: add v1.4s, v1.4s, v19.4s +; CHECK-GI-NEXT: add v4.4s, v16.4s, v17.4s +; CHECK-GI-NEXT: add v5.4s, v29.4s, v31.4s +; CHECK-GI-NEXT: add v7.4s, v8.4s, v9.4s +; CHECK-GI-NEXT: add v16.4s, v10.4s, v11.4s +; CHECK-GI-NEXT: add v17.4s, v12.4s, v13.4s +; CHECK-GI-NEXT: add v0.4s, v0.4s, v6.4s +; CHECK-GI-NEXT: add v2.4s, v2.4s, v3.4s +; CHECK-GI-NEXT: mov v23.s[3], wzr +; CHECK-GI-NEXT: mov v24.s[3], wzr +; CHECK-GI-NEXT: add v3.4s, v22.4s, v25.4s +; CHECK-GI-NEXT: add v6.4s, v26.4s, v27.4s +; CHECK-GI-NEXT: add v1.4s, v1.4s, v4.4s +; CHECK-GI-NEXT: add v4.4s, v5.4s, v7.4s +; CHECK-GI-NEXT: add v5.4s, v16.4s, v17.4s ; CHECK-GI-NEXT: add v0.4s, v0.4s, v2.4s +; CHECK-GI-NEXT: ldr x29, [sp, #64] // 8-byte Folded Reload +; CHECK-GI-NEXT: ldp d9, d8, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: add v2.4s, v3.4s, v6.4s +; CHECK-GI-NEXT: add v1.4s, v23.4s, v1.4s +; CHECK-GI-NEXT: add v3.4s, v4.4s, v5.4s +; CHECK-GI-NEXT: add v0.4s, v24.4s, v0.4s +; CHECK-GI-NEXT: ldp d11, d10, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: add v1.4s, v2.4s, v1.4s +; CHECK-GI-NEXT: ldp d13, d12, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s ; CHECK-GI-NEXT: addv s1, v1.4s ; CHECK-GI-NEXT: addv s0, v0.4s ; CHECK-GI-NEXT: fmov w8, s1 ; CHECK-GI-NEXT: fmov w9, s0 ; CHECK-GI-NEXT: add w0, w8, w9 -; CHECK-GI-NEXT: add sp, sp, #96 +; CHECK-GI-NEXT: ldp d15, d14, [sp], #80 // 16-byte Folded Reload ; CHECK-GI-NEXT: ret entry: %az = sext <33 x i8> %a to <33 x i32> diff --git a/llvm/test/CodeGen/AArch64/neon-extmul.ll b/llvm/test/CodeGen/AArch64/neon-extmul.ll index c82f8e1..84b634d 100644 --- a/llvm/test/CodeGen/AArch64/neon-extmul.ll +++ b/llvm/test/CodeGen/AArch64/neon-extmul.ll @@ -12,10 +12,9 @@ define <8 x i32> @extmuls_v8i8_i32(<8 x i8> %s0, <8 x i8> %s1) { ; ; CHECK-GI-LABEL: extmuls_v8i8_i32: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: sshll v2.8h, v0.8b, #0 -; CHECK-GI-NEXT: sshll v1.8h, v1.8b, #0 -; CHECK-GI-NEXT: smull v0.4s, v2.4h, v1.4h -; CHECK-GI-NEXT: smull2 v1.4s, v2.8h, v1.8h +; CHECK-GI-NEXT: smull v1.8h, v0.8b, v1.8b +; CHECK-GI-NEXT: sshll v0.4s, v1.4h, #0 +; CHECK-GI-NEXT: sshll2 v1.4s, v1.8h, #0 ; CHECK-GI-NEXT: ret entry: %s0s = sext <8 x i8> %s0 to <8 x i32> @@ -34,10 +33,9 @@ define <8 x i32> @extmulu_v8i8_i32(<8 x i8> %s0, <8 x i8> %s1) { ; ; CHECK-GI-LABEL: extmulu_v8i8_i32: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: ushll v2.8h, v0.8b, #0 -; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0 -; CHECK-GI-NEXT: umull v0.4s, v2.4h, v1.4h -; CHECK-GI-NEXT: umull2 v1.4s, v2.8h, v1.8h +; CHECK-GI-NEXT: umull v1.8h, v0.8b, v1.8b +; CHECK-GI-NEXT: ushll v0.4s, v1.4h, #0 +; CHECK-GI-NEXT: ushll2 v1.4s, v1.8h, #0 ; CHECK-GI-NEXT: ret entry: %s0s = zext <8 x i8> %s0 to <8 x i32> @@ -79,12 +77,9 @@ define <8 x i32> @extmuladds_v8i8_i32(<8 x i8> %s0, <8 x i8> %s1, <8 x i32> %b) ; ; CHECK-GI-LABEL: extmuladds_v8i8_i32: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: sshll v0.8h, v0.8b, #0 -; CHECK-GI-NEXT: sshll v1.8h, v1.8b, #0 -; CHECK-GI-NEXT: smlal v2.4s, v0.4h, v1.4h -; CHECK-GI-NEXT: smlal2 v3.4s, v0.8h, v1.8h -; CHECK-GI-NEXT: mov v0.16b, v2.16b -; CHECK-GI-NEXT: mov v1.16b, v3.16b +; CHECK-GI-NEXT: smull v1.8h, v0.8b, v1.8b +; CHECK-GI-NEXT: saddw v0.4s, v2.4s, v1.4h +; CHECK-GI-NEXT: saddw2 v1.4s, v3.4s, v1.8h ; CHECK-GI-NEXT: ret entry: %s0s = sext <8 x i8> %s0 to <8 x i32> @@ -104,12 +99,9 @@ define <8 x i32> @extmuladdu_v8i8_i32(<8 x i8> %s0, <8 x i8> %s1, <8 x i32> %b) ; ; CHECK-GI-LABEL: extmuladdu_v8i8_i32: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0 -; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0 -; CHECK-GI-NEXT: umlal v2.4s, v0.4h, v1.4h -; CHECK-GI-NEXT: umlal2 v3.4s, v0.8h, v1.8h -; CHECK-GI-NEXT: mov v0.16b, v2.16b -; CHECK-GI-NEXT: mov v1.16b, v3.16b +; CHECK-GI-NEXT: umull v1.8h, v0.8b, v1.8b +; CHECK-GI-NEXT: uaddw v0.4s, v2.4s, v1.4h +; CHECK-GI-NEXT: uaddw2 v1.4s, v3.4s, v1.8h ; CHECK-GI-NEXT: ret entry: %s0s = zext <8 x i8> %s0 to <8 x i32> @@ -163,16 +155,13 @@ define <8 x i64> @extmuls_v8i8_i64(<8 x i8> %s0, <8 x i8> %s1) { ; ; CHECK-GI-LABEL: extmuls_v8i8_i64: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: sshll v0.8h, v0.8b, #0 -; CHECK-GI-NEXT: sshll v1.8h, v1.8b, #0 -; CHECK-GI-NEXT: sshll v2.4s, v0.4h, #0 -; CHECK-GI-NEXT: sshll v3.4s, v1.4h, #0 -; CHECK-GI-NEXT: sshll2 v4.4s, v0.8h, #0 -; CHECK-GI-NEXT: sshll2 v5.4s, v1.8h, #0 -; CHECK-GI-NEXT: smull v0.2d, v2.2s, v3.2s -; CHECK-GI-NEXT: smull2 v1.2d, v2.4s, v3.4s -; CHECK-GI-NEXT: smull v2.2d, v4.2s, v5.2s -; CHECK-GI-NEXT: smull2 v3.2d, v4.4s, v5.4s +; CHECK-GI-NEXT: smull v0.8h, v0.8b, v1.8b +; CHECK-GI-NEXT: sshll v1.4s, v0.4h, #0 +; CHECK-GI-NEXT: sshll2 v3.4s, v0.8h, #0 +; CHECK-GI-NEXT: sshll v0.2d, v1.2s, #0 +; CHECK-GI-NEXT: sshll2 v1.2d, v1.4s, #0 +; CHECK-GI-NEXT: sshll v2.2d, v3.2s, #0 +; CHECK-GI-NEXT: sshll2 v3.2d, v3.4s, #0 ; CHECK-GI-NEXT: ret entry: %s0s = sext <8 x i8> %s0 to <8 x i64> @@ -195,16 +184,13 @@ define <8 x i64> @extmulu_v8i8_i64(<8 x i8> %s0, <8 x i8> %s1) { ; ; CHECK-GI-LABEL: extmulu_v8i8_i64: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0 -; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0 -; CHECK-GI-NEXT: ushll v2.4s, v0.4h, #0 -; CHECK-GI-NEXT: ushll v3.4s, v1.4h, #0 -; CHECK-GI-NEXT: ushll2 v4.4s, v0.8h, #0 -; CHECK-GI-NEXT: ushll2 v5.4s, v1.8h, #0 -; CHECK-GI-NEXT: umull v0.2d, v2.2s, v3.2s -; CHECK-GI-NEXT: umull2 v1.2d, v2.4s, v3.4s -; CHECK-GI-NEXT: umull v2.2d, v4.2s, v5.2s -; CHECK-GI-NEXT: umull2 v3.2d, v4.4s, v5.4s +; CHECK-GI-NEXT: umull v0.8h, v0.8b, v1.8b +; CHECK-GI-NEXT: ushll v1.4s, v0.4h, #0 +; CHECK-GI-NEXT: ushll2 v3.4s, v0.8h, #0 +; CHECK-GI-NEXT: ushll v0.2d, v1.2s, #0 +; CHECK-GI-NEXT: ushll2 v1.2d, v1.4s, #0 +; CHECK-GI-NEXT: ushll v2.2d, v3.2s, #0 +; CHECK-GI-NEXT: ushll2 v3.2d, v3.4s, #0 ; CHECK-GI-NEXT: ret entry: %s0s = zext <8 x i8> %s0 to <8 x i64> @@ -263,20 +249,13 @@ define <8 x i64> @extmuladds_v8i8_i64(<8 x i8> %s0, <8 x i8> %s1, <8 x i64> %b) ; ; CHECK-GI-LABEL: extmuladds_v8i8_i64: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: sshll v0.8h, v0.8b, #0 -; CHECK-GI-NEXT: sshll v1.8h, v1.8b, #0 -; CHECK-GI-NEXT: sshll v6.4s, v0.4h, #0 -; CHECK-GI-NEXT: sshll v7.4s, v1.4h, #0 -; CHECK-GI-NEXT: sshll2 v0.4s, v0.8h, #0 -; CHECK-GI-NEXT: sshll2 v1.4s, v1.8h, #0 -; CHECK-GI-NEXT: smlal v2.2d, v6.2s, v7.2s -; CHECK-GI-NEXT: smlal2 v3.2d, v6.4s, v7.4s -; CHECK-GI-NEXT: smlal v4.2d, v0.2s, v1.2s -; CHECK-GI-NEXT: smlal2 v5.2d, v0.4s, v1.4s -; CHECK-GI-NEXT: mov v0.16b, v2.16b -; CHECK-GI-NEXT: mov v1.16b, v3.16b -; CHECK-GI-NEXT: mov v2.16b, v4.16b -; CHECK-GI-NEXT: mov v3.16b, v5.16b +; CHECK-GI-NEXT: smull v0.8h, v0.8b, v1.8b +; CHECK-GI-NEXT: sshll v1.4s, v0.4h, #0 +; CHECK-GI-NEXT: sshll2 v6.4s, v0.8h, #0 +; CHECK-GI-NEXT: saddw v0.2d, v2.2d, v1.2s +; CHECK-GI-NEXT: saddw2 v1.2d, v3.2d, v1.4s +; CHECK-GI-NEXT: saddw v2.2d, v4.2d, v6.2s +; CHECK-GI-NEXT: saddw2 v3.2d, v5.2d, v6.4s ; CHECK-GI-NEXT: ret entry: %s0s = sext <8 x i8> %s0 to <8 x i64> @@ -301,20 +280,13 @@ define <8 x i64> @extmuladdu_v8i8_i64(<8 x i8> %s0, <8 x i8> %s1, <8 x i64> %b) ; ; CHECK-GI-LABEL: extmuladdu_v8i8_i64: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0 -; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0 -; CHECK-GI-NEXT: ushll v6.4s, v0.4h, #0 -; CHECK-GI-NEXT: ushll v7.4s, v1.4h, #0 -; CHECK-GI-NEXT: ushll2 v0.4s, v0.8h, #0 -; CHECK-GI-NEXT: ushll2 v1.4s, v1.8h, #0 -; CHECK-GI-NEXT: umlal v2.2d, v6.2s, v7.2s -; CHECK-GI-NEXT: umlal2 v3.2d, v6.4s, v7.4s -; CHECK-GI-NEXT: umlal v4.2d, v0.2s, v1.2s -; CHECK-GI-NEXT: umlal2 v5.2d, v0.4s, v1.4s -; CHECK-GI-NEXT: mov v0.16b, v2.16b -; CHECK-GI-NEXT: mov v1.16b, v3.16b -; CHECK-GI-NEXT: mov v2.16b, v4.16b -; CHECK-GI-NEXT: mov v3.16b, v5.16b +; CHECK-GI-NEXT: umull v0.8h, v0.8b, v1.8b +; CHECK-GI-NEXT: ushll v1.4s, v0.4h, #0 +; CHECK-GI-NEXT: ushll2 v6.4s, v0.8h, #0 +; CHECK-GI-NEXT: uaddw v0.2d, v2.2d, v1.2s +; CHECK-GI-NEXT: uaddw2 v1.2d, v3.2d, v1.4s +; CHECK-GI-NEXT: uaddw v2.2d, v4.2d, v6.2s +; CHECK-GI-NEXT: uaddw2 v3.2d, v5.2d, v6.4s ; CHECK-GI-NEXT: ret entry: %s0s = zext <8 x i8> %s0 to <8 x i64> diff --git a/llvm/test/CodeGen/AArch64/peephole-and-tst.ll b/llvm/test/CodeGen/AArch64/peephole-and-tst.ll index 17ad298..3caac1d 100644 --- a/llvm/test/CodeGen/AArch64/peephole-and-tst.ll +++ b/llvm/test/CodeGen/AArch64/peephole-and-tst.ll @@ -1,40 +1,72 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s --check-prefixes=CHECK,CHECK-SD +; RUN: llc < %s -mtriple=aarch64-- -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI %struct.anon = type { ptr, ptr } @ptr_wrapper = common global ptr null, align 8 define i32 @test_func_i32_two_uses(i32 %in, i32 %bit, i32 %mask) { -; CHECK-LABEL: test_func_i32_two_uses: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: adrp x8, :got:ptr_wrapper -; CHECK-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper] -; CHECK-NEXT: ldr x9, [x8] -; CHECK-NEXT: mov w8, wzr -; CHECK-NEXT: b .LBB0_3 -; CHECK-NEXT: .LBB0_1: // in Loop: Header=BB0_3 Depth=1 -; CHECK-NEXT: str xzr, [x9, #8] -; CHECK-NEXT: .LBB0_2: // in Loop: Header=BB0_3 Depth=1 -; CHECK-NEXT: lsl w1, w1, #1 -; CHECK-NEXT: cbz w1, .LBB0_6 -; CHECK-NEXT: .LBB0_3: // %do.body -; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: ands w10, w1, w0 -; CHECK-NEXT: and w11, w2, w0 -; CHECK-NEXT: cinc w8, w8, ne -; CHECK-NEXT: cmp w10, w11 -; CHECK-NEXT: b.eq .LBB0_1 -; CHECK-NEXT: // %bb.4: // %do.body -; CHECK-NEXT: // in Loop: Header=BB0_3 Depth=1 -; CHECK-NEXT: cbnz w2, .LBB0_1 -; CHECK-NEXT: // %bb.5: // %do.body -; CHECK-NEXT: // in Loop: Header=BB0_3 Depth=1 -; CHECK-NEXT: cbz w10, .LBB0_2 -; CHECK-NEXT: b .LBB0_1 -; CHECK-NEXT: .LBB0_6: // %do.end -; CHECK-NEXT: mov w0, w8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_func_i32_two_uses: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: adrp x8, :got:ptr_wrapper +; CHECK-SD-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper] +; CHECK-SD-NEXT: ldr x9, [x8] +; CHECK-SD-NEXT: mov w8, wzr +; CHECK-SD-NEXT: b .LBB0_3 +; CHECK-SD-NEXT: .LBB0_1: // in Loop: Header=BB0_3 Depth=1 +; CHECK-SD-NEXT: str xzr, [x9, #8] +; CHECK-SD-NEXT: .LBB0_2: // in Loop: Header=BB0_3 Depth=1 +; CHECK-SD-NEXT: lsl w1, w1, #1 +; CHECK-SD-NEXT: cbz w1, .LBB0_6 +; CHECK-SD-NEXT: .LBB0_3: // %do.body +; CHECK-SD-NEXT: // =>This Inner Loop Header: Depth=1 +; CHECK-SD-NEXT: ands w10, w1, w0 +; CHECK-SD-NEXT: and w11, w2, w0 +; CHECK-SD-NEXT: cinc w8, w8, ne +; CHECK-SD-NEXT: cmp w10, w11 +; CHECK-SD-NEXT: b.eq .LBB0_1 +; CHECK-SD-NEXT: // %bb.4: // %do.body +; CHECK-SD-NEXT: // in Loop: Header=BB0_3 Depth=1 +; CHECK-SD-NEXT: cbnz w2, .LBB0_1 +; CHECK-SD-NEXT: // %bb.5: // %do.body +; CHECK-SD-NEXT: // in Loop: Header=BB0_3 Depth=1 +; CHECK-SD-NEXT: cbz w10, .LBB0_2 +; CHECK-SD-NEXT: b .LBB0_1 +; CHECK-SD-NEXT: .LBB0_6: // %do.end +; CHECK-SD-NEXT: mov w0, w8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_func_i32_two_uses: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: adrp x8, :got:ptr_wrapper +; CHECK-GI-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper] +; CHECK-GI-NEXT: ldr x9, [x8] +; CHECK-GI-NEXT: mov w8, wzr +; CHECK-GI-NEXT: b .LBB0_3 +; CHECK-GI-NEXT: .LBB0_1: // in Loop: Header=BB0_3 Depth=1 +; CHECK-GI-NEXT: str xzr, [x9, #8] +; CHECK-GI-NEXT: .LBB0_2: // in Loop: Header=BB0_3 Depth=1 +; CHECK-GI-NEXT: lsl w1, w1, #1 +; CHECK-GI-NEXT: cbz w1, .LBB0_6 +; CHECK-GI-NEXT: .LBB0_3: // %do.body +; CHECK-GI-NEXT: // =>This Inner Loop Header: Depth=1 +; CHECK-GI-NEXT: and w10, w1, w0 +; CHECK-GI-NEXT: tst w1, w0 +; CHECK-GI-NEXT: and w11, w2, w0 +; CHECK-GI-NEXT: cinc w8, w8, ne +; CHECK-GI-NEXT: cmp w10, w11 +; CHECK-GI-NEXT: b.eq .LBB0_1 +; CHECK-GI-NEXT: // %bb.4: // %do.body +; CHECK-GI-NEXT: // in Loop: Header=BB0_3 Depth=1 +; CHECK-GI-NEXT: cbnz w2, .LBB0_1 +; CHECK-GI-NEXT: // %bb.5: // %do.body +; CHECK-GI-NEXT: // in Loop: Header=BB0_3 Depth=1 +; CHECK-GI-NEXT: cbz w10, .LBB0_2 +; CHECK-GI-NEXT: b .LBB0_1 +; CHECK-GI-NEXT: .LBB0_6: // %do.end +; CHECK-GI-NEXT: mov w0, w8 +; CHECK-GI-NEXT: ret entry: %0 = load ptr, ptr @ptr_wrapper, align 8 %result = getelementptr inbounds %struct.anon, ptr %0, i64 0, i32 1 @@ -70,28 +102,52 @@ do.end: ; preds = %4 } define i32 @test_func_i64_one_use(i64 %in, i64 %bit, i64 %mask) { -; CHECK-LABEL: test_func_i64_one_use: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: adrp x8, :got:ptr_wrapper -; CHECK-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper] -; CHECK-NEXT: ldr x9, [x8] -; CHECK-NEXT: mov w8, wzr -; CHECK-NEXT: b .LBB1_2 -; CHECK-NEXT: .LBB1_1: // in Loop: Header=BB1_2 Depth=1 -; CHECK-NEXT: lsl x1, x1, #1 -; CHECK-NEXT: cbz x1, .LBB1_4 -; CHECK-NEXT: .LBB1_2: // %do.body -; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: ands x10, x1, x0 -; CHECK-NEXT: orr x10, x2, x10 -; CHECK-NEXT: cinc w8, w8, ne -; CHECK-NEXT: cbz x10, .LBB1_1 -; CHECK-NEXT: // %bb.3: // in Loop: Header=BB1_2 Depth=1 -; CHECK-NEXT: str xzr, [x9, #8] -; CHECK-NEXT: b .LBB1_1 -; CHECK-NEXT: .LBB1_4: // %do.end -; CHECK-NEXT: mov w0, w8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_func_i64_one_use: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: adrp x8, :got:ptr_wrapper +; CHECK-SD-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper] +; CHECK-SD-NEXT: ldr x9, [x8] +; CHECK-SD-NEXT: mov w8, wzr +; CHECK-SD-NEXT: b .LBB1_2 +; CHECK-SD-NEXT: .LBB1_1: // in Loop: Header=BB1_2 Depth=1 +; CHECK-SD-NEXT: lsl x1, x1, #1 +; CHECK-SD-NEXT: cbz x1, .LBB1_4 +; CHECK-SD-NEXT: .LBB1_2: // %do.body +; CHECK-SD-NEXT: // =>This Inner Loop Header: Depth=1 +; CHECK-SD-NEXT: ands x10, x1, x0 +; CHECK-SD-NEXT: orr x10, x2, x10 +; CHECK-SD-NEXT: cinc w8, w8, ne +; CHECK-SD-NEXT: cbz x10, .LBB1_1 +; CHECK-SD-NEXT: // %bb.3: // in Loop: Header=BB1_2 Depth=1 +; CHECK-SD-NEXT: str xzr, [x9, #8] +; CHECK-SD-NEXT: b .LBB1_1 +; CHECK-SD-NEXT: .LBB1_4: // %do.end +; CHECK-SD-NEXT: mov w0, w8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_func_i64_one_use: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: adrp x8, :got:ptr_wrapper +; CHECK-GI-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper] +; CHECK-GI-NEXT: ldr x9, [x8] +; CHECK-GI-NEXT: mov w8, wzr +; CHECK-GI-NEXT: b .LBB1_2 +; CHECK-GI-NEXT: .LBB1_1: // in Loop: Header=BB1_2 Depth=1 +; CHECK-GI-NEXT: lsl x1, x1, #1 +; CHECK-GI-NEXT: cbz x1, .LBB1_4 +; CHECK-GI-NEXT: .LBB1_2: // %do.body +; CHECK-GI-NEXT: // =>This Inner Loop Header: Depth=1 +; CHECK-GI-NEXT: and x10, x1, x0 +; CHECK-GI-NEXT: tst x1, x0 +; CHECK-GI-NEXT: orr x10, x2, x10 +; CHECK-GI-NEXT: cinc w8, w8, ne +; CHECK-GI-NEXT: cbz x10, .LBB1_1 +; CHECK-GI-NEXT: // %bb.3: // in Loop: Header=BB1_2 Depth=1 +; CHECK-GI-NEXT: str xzr, [x9, #8] +; CHECK-GI-NEXT: b .LBB1_1 +; CHECK-GI-NEXT: .LBB1_4: // %do.end +; CHECK-GI-NEXT: mov w0, w8 +; CHECK-GI-NEXT: ret entry: %0 = load ptr, ptr @ptr_wrapper, align 8 %result = getelementptr inbounds %struct.anon, ptr %0, i64 0, i32 1 @@ -124,11 +180,18 @@ do.end: ; preds = %4 } define i64 @test_and1(i64 %x, i64 %y) { -; CHECK-LABEL: test_and1: -; CHECK: // %bb.0: -; CHECK-NEXT: ands x8, x0, #0x3 -; CHECK-NEXT: csel x0, x8, x1, eq -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_and1: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ands x8, x0, #0x3 +; CHECK-SD-NEXT: csel x0, x8, x1, eq +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_and1: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: and x8, x0, #0x3 +; CHECK-GI-NEXT: tst x0, #0x3 +; CHECK-GI-NEXT: csel x0, x8, x1, eq +; CHECK-GI-NEXT: ret %a = and i64 %x, 3 %c = icmp eq i64 %a, 0 %s = select i1 %c, i64 %a, i64 %y @@ -148,23 +211,43 @@ define i64 @test_and2(i64 %x, i64 %y) { } define i64 @test_and3(i64 %x, i64 %y) { -; CHECK-LABEL: test_and3: -; CHECK: // %bb.0: -; CHECK-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 32 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w30, -32 -; CHECK-NEXT: mov x20, x0 -; CHECK-NEXT: mov x0, xzr -; CHECK-NEXT: mov x19, x1 -; CHECK-NEXT: bl callee -; CHECK-NEXT: ands x8, x20, #0x3 -; CHECK-NEXT: csel x0, x8, x19, eq -; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_and3: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill +; CHECK-SD-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 32 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w20, -16 +; CHECK-SD-NEXT: .cfi_offset w30, -32 +; CHECK-SD-NEXT: mov x20, x0 +; CHECK-SD-NEXT: mov x0, xzr +; CHECK-SD-NEXT: mov x19, x1 +; CHECK-SD-NEXT: bl callee +; CHECK-SD-NEXT: ands x8, x20, #0x3 +; CHECK-SD-NEXT: csel x0, x8, x19, eq +; CHECK-SD-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_and3: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 32 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w20, -16 +; CHECK-GI-NEXT: .cfi_offset w21, -24 +; CHECK-GI-NEXT: .cfi_offset w30, -32 +; CHECK-GI-NEXT: mov x19, x0 +; CHECK-GI-NEXT: and x21, x0, #0x3 +; CHECK-GI-NEXT: mov x0, xzr +; CHECK-GI-NEXT: mov x20, x1 +; CHECK-GI-NEXT: bl callee +; CHECK-GI-NEXT: tst x19, #0x3 +; CHECK-GI-NEXT: csel x0, x21, x20, eq +; CHECK-GI-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload +; CHECK-GI-NEXT: ret %a = and i64 %x, 3 %b = call i64 @callee(i64 0) %c = icmp eq i64 %a, 0 @@ -173,19 +256,37 @@ define i64 @test_and3(i64 %x, i64 %y) { } define i64 @test_and_4(i64 %x, i64 %y) { -; CHECK-LABEL: test_and_4: -; CHECK: // %bb.0: -; CHECK-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w30, -16 -; CHECK-NEXT: mov x19, x0 -; CHECK-NEXT: ands x0, x0, #0x3 -; CHECK-NEXT: bl callee -; CHECK-NEXT: ands x8, x19, #0x3 -; CHECK-NEXT: csel x0, x8, x0, eq -; CHECK-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_and_4: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 16 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w30, -16 +; CHECK-SD-NEXT: mov x19, x0 +; CHECK-SD-NEXT: ands x0, x0, #0x3 +; CHECK-SD-NEXT: bl callee +; CHECK-SD-NEXT: ands x8, x19, #0x3 +; CHECK-SD-NEXT: csel x0, x8, x0, eq +; CHECK-SD-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_and_4: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 32 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w20, -16 +; CHECK-GI-NEXT: .cfi_offset w30, -32 +; CHECK-GI-NEXT: and x20, x0, #0x3 +; CHECK-GI-NEXT: mov x19, x0 +; CHECK-GI-NEXT: mov x0, x20 +; CHECK-GI-NEXT: bl callee +; CHECK-GI-NEXT: tst x19, #0x3 +; CHECK-GI-NEXT: csel x0, x20, x0, eq +; CHECK-GI-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload +; CHECK-GI-NEXT: ret %a = and i64 %x, 3 %b = call i64 @callee(i64 %a) %c = icmp eq i64 %a, 0 diff --git a/llvm/test/CodeGen/AArch64/preserve_nonecc_varargs_darwin.ll b/llvm/test/CodeGen/AArch64/preserve_nonecc_varargs_darwin.ll index 2a77d4d..4206c0bc 100644 --- a/llvm/test/CodeGen/AArch64/preserve_nonecc_varargs_darwin.ll +++ b/llvm/test/CodeGen/AArch64/preserve_nonecc_varargs_darwin.ll @@ -27,11 +27,12 @@ define i32 @caller() nounwind ssp { ; CHECK-NEXT: sub sp, sp, #208 ; CHECK-NEXT: mov w8, #10 ; =0xa ; CHECK-NEXT: mov w9, #9 ; =0x9 -; CHECK-NEXT: mov w10, #8 ; =0x8 +; CHECK-NEXT: mov w0, #1 ; =0x1 ; CHECK-NEXT: stp x9, x8, [sp, #24] -; CHECK-NEXT: mov w8, #7 ; =0x7 +; CHECK-NEXT: mov w8, #8 ; =0x8 ; CHECK-NEXT: mov w9, #6 ; =0x6 -; CHECK-NEXT: mov w0, #1 ; =0x1 +; CHECK-NEXT: str x8, [sp, #16] +; CHECK-NEXT: mov w8, #7 ; =0x7 ; CHECK-NEXT: mov w1, #2 ; =0x2 ; CHECK-NEXT: mov w2, #3 ; =0x3 ; CHECK-NEXT: mov w3, #4 ; =0x4 @@ -46,8 +47,7 @@ define i32 @caller() nounwind ssp { ; CHECK-NEXT: stp x22, x21, [sp, #160] ; 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #176] ; 16-byte Folded Spill ; CHECK-NEXT: stp x29, x30, [sp, #192] ; 16-byte Folded Spill -; CHECK-NEXT: stp x8, x10, [sp, #8] -; CHECK-NEXT: str x9, [sp] +; CHECK-NEXT: stp x9, x8, [sp] ; CHECK-NEXT: bl _callee ; CHECK-NEXT: ldp x29, x30, [sp, #192] ; 16-byte Folded Reload ; CHECK-NEXT: ldp x20, x19, [sp, #176] ; 16-byte Folded Reload diff --git a/llvm/test/CodeGen/AArch64/reassocmls.ll b/llvm/test/CodeGen/AArch64/reassocmls.ll index acbf9fc..0909fbf 100644 --- a/llvm/test/CodeGen/AArch64/reassocmls.ll +++ b/llvm/test/CodeGen/AArch64/reassocmls.ll @@ -1,12 +1,25 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 | FileCheck %s +; RUN: llc -mtriple=aarch64-none-elf -mattr=+sve2 < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD +; RUN: llc -mtriple=aarch64-none-elf -mattr=+sve2 -global-isel -global-isel-abort=2 2>&1 < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI + +; CHECK-GI: warning: Instruction selection used fallback path for smlsl_nxv8i16 +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for umlsl_nxv8i16 +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for mls_nxv8i16 +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for mla_nxv8i16 define i64 @smlsl_i64(i64 %a, i32 %b, i32 %c, i32 %d, i32 %e) { -; CHECK-LABEL: smlsl_i64: -; CHECK: // %bb.0: -; CHECK-NEXT: smsubl x8, w4, w3, x0 -; CHECK-NEXT: smsubl x0, w2, w1, x8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: smlsl_i64: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: smsubl x8, w4, w3, x0 +; CHECK-SD-NEXT: smsubl x0, w2, w1, x8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: smlsl_i64: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: smull x8, w2, w1 +; CHECK-GI-NEXT: smaddl x8, w4, w3, x8 +; CHECK-GI-NEXT: sub x0, x0, x8 +; CHECK-GI-NEXT: ret %be = sext i32 %b to i64 %ce = sext i32 %c to i64 %de = sext i32 %d to i64 @@ -19,11 +32,18 @@ define i64 @smlsl_i64(i64 %a, i32 %b, i32 %c, i32 %d, i32 %e) { } define i64 @umlsl_i64(i64 %a, i32 %b, i32 %c, i32 %d, i32 %e) { -; CHECK-LABEL: umlsl_i64: -; CHECK: // %bb.0: -; CHECK-NEXT: umsubl x8, w4, w3, x0 -; CHECK-NEXT: umsubl x0, w2, w1, x8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: umlsl_i64: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: umsubl x8, w4, w3, x0 +; CHECK-SD-NEXT: umsubl x0, w2, w1, x8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: umlsl_i64: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: umull x8, w2, w1 +; CHECK-GI-NEXT: umaddl x8, w4, w3, x8 +; CHECK-GI-NEXT: sub x0, x0, x8 +; CHECK-GI-NEXT: ret %be = zext i32 %b to i64 %ce = zext i32 %c to i64 %de = zext i32 %d to i64 @@ -36,11 +56,18 @@ define i64 @umlsl_i64(i64 %a, i32 %b, i32 %c, i32 %d, i32 %e) { } define i64 @mls_i64(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e) { -; CHECK-LABEL: mls_i64: -; CHECK: // %bb.0: -; CHECK-NEXT: msub x8, x4, x3, x0 -; CHECK-NEXT: msub x0, x2, x1, x8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: mls_i64: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: msub x8, x4, x3, x0 +; CHECK-SD-NEXT: msub x0, x2, x1, x8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: mls_i64: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mul x8, x2, x1 +; CHECK-GI-NEXT: madd x8, x4, x3, x8 +; CHECK-GI-NEXT: sub x0, x0, x8 +; CHECK-GI-NEXT: ret %m1.neg = mul i64 %c, %b %m2.neg = mul i64 %e, %d %reass.add = add i64 %m2.neg, %m1.neg @@ -49,11 +76,18 @@ define i64 @mls_i64(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e) { } define i16 @mls_i16(i16 %a, i16 %b, i16 %c, i16 %d, i16 %e) { -; CHECK-LABEL: mls_i16: -; CHECK: // %bb.0: -; CHECK-NEXT: msub w8, w4, w3, w0 -; CHECK-NEXT: msub w0, w2, w1, w8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: mls_i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: msub w8, w4, w3, w0 +; CHECK-SD-NEXT: msub w0, w2, w1, w8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: mls_i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mul w8, w2, w1 +; CHECK-GI-NEXT: madd w8, w4, w3, w8 +; CHECK-GI-NEXT: sub w0, w0, w8 +; CHECK-GI-NEXT: ret %m1.neg = mul i16 %c, %b %m2.neg = mul i16 %e, %d %reass.add = add i16 %m2.neg, %m1.neg @@ -91,12 +125,20 @@ define i64 @mls_i64_C(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e) { } define i64 @umlsl_i64_muls(i64 %a, i32 %b, i32 %c, i32 %d, i32 %e) { -; CHECK-LABEL: umlsl_i64_muls: -; CHECK: // %bb.0: -; CHECK-NEXT: umull x8, w2, w3 -; CHECK-NEXT: umsubl x8, w4, w3, x8 -; CHECK-NEXT: umsubl x0, w2, w1, x8 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: umlsl_i64_muls: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: umull x8, w2, w3 +; CHECK-SD-NEXT: umsubl x8, w4, w3, x8 +; CHECK-SD-NEXT: umsubl x0, w2, w1, x8 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: umlsl_i64_muls: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: umull x8, w2, w1 +; CHECK-GI-NEXT: umull x9, w2, w3 +; CHECK-GI-NEXT: umaddl x8, w4, w3, x8 +; CHECK-GI-NEXT: sub x0, x9, x8 +; CHECK-GI-NEXT: ret %be = zext i32 %b to i64 %ce = zext i32 %c to i64 %de = zext i32 %d to i64 @@ -110,13 +152,21 @@ define i64 @umlsl_i64_muls(i64 %a, i32 %b, i32 %c, i32 %d, i32 %e) { } define i64 @umlsl_i64_uses(i64 %a, i32 %b, i32 %c, i32 %d, i32 %e) { -; CHECK-LABEL: umlsl_i64_uses: -; CHECK: // %bb.0: -; CHECK-NEXT: umull x8, w4, w3 -; CHECK-NEXT: umaddl x8, w2, w1, x8 -; CHECK-NEXT: sub x9, x0, x8 -; CHECK-NEXT: and x0, x8, x9 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: umlsl_i64_uses: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: umull x8, w4, w3 +; CHECK-SD-NEXT: umaddl x8, w2, w1, x8 +; CHECK-SD-NEXT: sub x9, x0, x8 +; CHECK-SD-NEXT: and x0, x8, x9 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: umlsl_i64_uses: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: umull x8, w2, w1 +; CHECK-GI-NEXT: umaddl x8, w4, w3, x8 +; CHECK-GI-NEXT: sub x9, x0, x8 +; CHECK-GI-NEXT: and x0, x8, x9 +; CHECK-GI-NEXT: ret %be = zext i32 %b to i64 %ce = zext i32 %c to i64 %de = zext i32 %d to i64 @@ -175,11 +225,18 @@ define i64 @mla_i64_mul(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e) { define <8 x i16> @smlsl_v8i16(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d, <8 x i8> %e) { -; CHECK-LABEL: smlsl_v8i16: -; CHECK: // %bb.0: -; CHECK-NEXT: smlsl v0.8h, v4.8b, v3.8b -; CHECK-NEXT: smlsl v0.8h, v2.8b, v1.8b -; CHECK-NEXT: ret +; CHECK-SD-LABEL: smlsl_v8i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: smlsl v0.8h, v4.8b, v3.8b +; CHECK-SD-NEXT: smlsl v0.8h, v2.8b, v1.8b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: smlsl_v8i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: smull v1.8h, v2.8b, v1.8b +; CHECK-GI-NEXT: smlal v1.8h, v4.8b, v3.8b +; CHECK-GI-NEXT: sub v0.8h, v0.8h, v1.8h +; CHECK-GI-NEXT: ret %be = sext <8 x i8> %b to <8 x i16> %ce = sext <8 x i8> %c to <8 x i16> %de = sext <8 x i8> %d to <8 x i16> @@ -192,11 +249,18 @@ define <8 x i16> @smlsl_v8i16(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> % } define <8 x i16> @umlsl_v8i16(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d, <8 x i8> %e) { -; CHECK-LABEL: umlsl_v8i16: -; CHECK: // %bb.0: -; CHECK-NEXT: umlsl v0.8h, v4.8b, v3.8b -; CHECK-NEXT: umlsl v0.8h, v2.8b, v1.8b -; CHECK-NEXT: ret +; CHECK-SD-LABEL: umlsl_v8i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: umlsl v0.8h, v4.8b, v3.8b +; CHECK-SD-NEXT: umlsl v0.8h, v2.8b, v1.8b +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: umlsl_v8i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: umull v1.8h, v2.8b, v1.8b +; CHECK-GI-NEXT: umlal v1.8h, v4.8b, v3.8b +; CHECK-GI-NEXT: sub v0.8h, v0.8h, v1.8h +; CHECK-GI-NEXT: ret %be = zext <8 x i8> %b to <8 x i16> %ce = zext <8 x i8> %c to <8 x i16> %de = zext <8 x i8> %d to <8 x i16> @@ -209,11 +273,18 @@ define <8 x i16> @umlsl_v8i16(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> % } define <8 x i16> @mls_v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> %d, <8 x i16> %e) { -; CHECK-LABEL: mls_v8i16: -; CHECK: // %bb.0: -; CHECK-NEXT: mls v0.8h, v4.8h, v3.8h -; CHECK-NEXT: mls v0.8h, v2.8h, v1.8h -; CHECK-NEXT: ret +; CHECK-SD-LABEL: mls_v8i16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mls v0.8h, v4.8h, v3.8h +; CHECK-SD-NEXT: mls v0.8h, v2.8h, v1.8h +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: mls_v8i16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mul v1.8h, v2.8h, v1.8h +; CHECK-GI-NEXT: mla v1.8h, v4.8h, v3.8h +; CHECK-GI-NEXT: sub v0.8h, v0.8h, v1.8h +; CHECK-GI-NEXT: ret %m1.neg = mul <8 x i16> %c, %b %m2.neg = mul <8 x i16> %e, %d %reass.add = add <8 x i16> %m2.neg, %m1.neg @@ -236,12 +307,20 @@ define <8 x i16> @mla_v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> } define <8 x i16> @mls_v8i16_C(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> %d, <8 x i16> %e) { -; CHECK-LABEL: mls_v8i16_C: -; CHECK: // %bb.0: -; CHECK-NEXT: movi v0.8h, #10 -; CHECK-NEXT: mls v0.8h, v4.8h, v3.8h -; CHECK-NEXT: mls v0.8h, v2.8h, v1.8h -; CHECK-NEXT: ret +; CHECK-SD-LABEL: mls_v8i16_C: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: movi v0.8h, #10 +; CHECK-SD-NEXT: mls v0.8h, v4.8h, v3.8h +; CHECK-SD-NEXT: mls v0.8h, v2.8h, v1.8h +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: mls_v8i16_C: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mul v0.8h, v2.8h, v1.8h +; CHECK-GI-NEXT: movi v1.8h, #10 +; CHECK-GI-NEXT: mla v0.8h, v4.8h, v3.8h +; CHECK-GI-NEXT: sub v0.8h, v1.8h, v0.8h +; CHECK-GI-NEXT: ret %m1.neg = mul <8 x i16> %c, %b %m2.neg = mul <8 x i16> %e, %d %reass.add = add <8 x i16> %m2.neg, %m1.neg @@ -250,13 +329,21 @@ define <8 x i16> @mls_v8i16_C(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16 } define <8 x i16> @mla_v8i16_C(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> %d, <8 x i16> %e) { -; CHECK-LABEL: mla_v8i16_C: -; CHECK: // %bb.0: -; CHECK-NEXT: mul v1.8h, v2.8h, v1.8h -; CHECK-NEXT: movi v0.8h, #10 -; CHECK-NEXT: mla v1.8h, v4.8h, v3.8h -; CHECK-NEXT: add v0.8h, v1.8h, v0.8h -; CHECK-NEXT: ret +; CHECK-SD-LABEL: mla_v8i16_C: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mul v1.8h, v2.8h, v1.8h +; CHECK-SD-NEXT: movi v0.8h, #10 +; CHECK-SD-NEXT: mla v1.8h, v4.8h, v3.8h +; CHECK-SD-NEXT: add v0.8h, v1.8h, v0.8h +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: mla_v8i16_C: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mul v0.8h, v2.8h, v1.8h +; CHECK-GI-NEXT: movi v1.8h, #10 +; CHECK-GI-NEXT: mla v0.8h, v4.8h, v3.8h +; CHECK-GI-NEXT: add v0.8h, v1.8h, v0.8h +; CHECK-GI-NEXT: ret %m1.neg = mul <8 x i16> %c, %b %m2.neg = mul <8 x i16> %e, %d %reass.add = add <8 x i16> %m2.neg, %m1.neg diff --git a/llvm/test/CodeGen/AArch64/register-coalesce-implicit-def-subreg-to-reg.mir b/llvm/test/CodeGen/AArch64/register-coalesce-implicit-def-subreg-to-reg.mir new file mode 100644 index 0000000..aecb90a --- /dev/null +++ b/llvm/test/CodeGen/AArch64/register-coalesce-implicit-def-subreg-to-reg.mir @@ -0,0 +1,23 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 +# RUN: llc -mtriple=aarch64 -start-before=register-coalescer -stop-after=virtregrewriter -enable-subreg-liveness=false -o - %s | FileCheck %s +# RUN: llc -mtriple=aarch64 -start-before=register-coalescer -stop-after=virtregrewriter -enable-subreg-liveness=true -o - %s | FileCheck %s +--- +name: test +tracksRegLiveness: true +body: | + bb.0: + liveins: $x1 + ; CHECK-LABEL: name: test + ; CHECK: liveins: $x1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: renamable $x0 = COPY $x1 + ; CHECK-NEXT: renamable $w1 = ORRWrr $wzr, renamable $w0, implicit-def renamable $x1 + ; CHECK-NEXT: RET_ReallyLR implicit $x1, implicit $x0 + %190:gpr64 = COPY killed $x1 + %191:gpr32 = COPY %190.sub_32:gpr64 + %192:gpr32 = ORRWrr $wzr, killed %191:gpr32 + %193:gpr64all = SUBREG_TO_REG 0, killed %192:gpr32, %subreg.sub_32 + $x0 = COPY killed %190:gpr64 + $x1 = COPY killed %193:gpr64all + RET_ReallyLR implicit $x1, implicit $x0 +... diff --git a/llvm/test/CodeGen/AArch64/register-coalesce-update-subranges-remat.mir b/llvm/test/CodeGen/AArch64/register-coalesce-update-subranges-remat.mir index 08fc47d..eb6242c 100644 --- a/llvm/test/CodeGen/AArch64/register-coalesce-update-subranges-remat.mir +++ b/llvm/test/CodeGen/AArch64/register-coalesce-update-subranges-remat.mir @@ -7,9 +7,18 @@ # CHECK-DBG: ********** JOINING INTERVALS *********** # CHECK-DBG: ********** INTERVALS ********** # CHECK-DBG: %0 [16r,32r:0) 0@16r weight:0.000000e+00 -# CHECK-DBG: %3 [48r,112r:0) 0@48r L0000000000000040 [48r,112r:0) 0@48r weight:0.000000e+00 -# CHECK-DBG: %4 [80r,112e:1)[112e,112d:0) 0@112e 1@80r L0000000000000080 [112e,112d:0) 0@112e L0000000000000040 [80r,112e:1)[112e,112d:0) 0@112e 1@80r weight:0.000000e+00 +# CHECK-DBG: %3 [48r,112r:0) 0@48r L0000000000000080 [48r,112r:0) 0@48r L0000000000000040 [48r,112r:0) 0@48r weight:0.000000e+00 +# CHECK-DBG: %4 [80r,112e:1)[112e,112d:0) 0@112e 1@80r L0000000000000080 [80r,112e:1)[112e,112d:0) 0@112e 1@80r L0000000000000040 [80r,112e:1)[112e,112d:0) 0@112e 1@80r weight:0.000000e+00 # CHECK-DBG: %5 [32r,112r:1)[112r,112d:0) 0@112r 1@32r weight:0.000000e+00 +# CHECK-DBG: ********** MACHINEINSTRS ********** +# CHECK-DBG: 0B bb.0.entry: +# CHECK-DBG: 16B %0:gpr64sp = ADDXri %stack.0, 0, 0 +# CHECK-DBG: 32B %5:gpr64common = nuw ADDXri %0:gpr64sp, 64, 0 +# CHECK-DBG: 48B undef %3.sub_32:gpr64 = MOVi32imm 64, implicit-def %3:gpr64 +# CHECK-DBG: 80B undef %4.sub_32:gpr64 = MOVi32imm 64, implicit-def %4:gpr64 +# CHECK-DBG: 112B dead %5:gpr64common, dead early-clobber %4:gpr64 = MOPSMemorySetPseudo %5:gpr64common(tied-def 0), %4:gpr64(tied-def 1), %3:gpr64, implicit-def dead $nzcv +# CHECK-DBG: 128B RET_ReallyLR + --- name: test tracksRegLiveness: true @@ -43,9 +52,44 @@ body: | # CHECK-DBG: %1 [32r,48B:2)[48B,320r:0)[320r,368B:1) 0@48B-phi 1@320r 2@32r # CHECK-DBG-SAME: weight:0.000000e+00 # CHECK-DBG: %3 [80r,160B:2)[240r,272B:1)[288r,304B:0)[304B,320r:3) 0@288r 1@240r 2@80r 3@304B-phi -# CHECK-DBG-SAME: L0000000000000080 [288r,304B:0)[304B,320r:3) 0@288r 1@x 2@x 3@304B-phi +# CHECK-DBG-SAME: L0000000000000080 [240r,272B:1)[288r,304B:0)[304B,320r:3) 0@288r 1@240r 2@x 3@304B-phi # CHECK-DBG-SAME: L0000000000000040 [80r,160B:2)[240r,272B:1)[288r,304B:0)[304B,320r:3) 0@288r 1@240r 2@80r 3@304B-phi # CHECK-DBG-SAME: weight:0.000000e+00 +# CHECK-DBG: ********** MACHINEINSTRS ********** +# CHECK-DBG: 0B bb.0: +# CHECK-DBG: successors: %bb.1(0x80000000); %bb.1(100.00%) +# CHECK-DBG: 32B %1:gpr64 = IMPLICIT_DEF +# CHECK-DBG: 48B bb.1: +# CHECK-DBG: ; predecessors: %bb.0, %bb.7 +# CHECK-DBG: successors: %bb.2(0x80000000); %bb.2(100.00%) +# CHECK-DBG: 64B bb.2: +# CHECK-DBG: ; predecessors: %bb.1 +# CHECK-DBG: successors: %bb.3(0x80000000); %bb.3(100.00%) +# CHECK-DBG: 80B undef %3.sub_32:gpr64 = MOVi32imm 1 +# CHECK-DBG: 96B bb.3: +# CHECK-DBG: ; predecessors: %bb.2 +# CHECK-DBG: successors: %bb.7(0x40000000), %bb.4(0x40000000); %bb.7(50.00%), %bb.4(50.00%) +# CHECK-DBG: 112B $nzcv = IMPLICIT_DEF +# CHECK-DBG: 144B Bcc 1, %bb.7, implicit killed $nzcv +# CHECK-DBG: 160B bb.4: +# CHECK-DBG: ; predecessors: %bb.3 +# CHECK-DBG: successors: %bb.6(0x40000000), %bb.5(0x40000000); %bb.6(50.00%), %bb.5(50.00%) +# CHECK-DBG: 176B $nzcv = IMPLICIT_DEF +# CHECK-DBG: 192B Bcc 1, %bb.6, implicit killed $nzcv +# CHECK-DBG: 208B bb.5: +# CHECK-DBG: ; predecessors: %bb.4 +# CHECK-DBG: successors: %bb.7(0x80000000); %bb.7(100.00%) +# CHECK-DBG: 240B undef %3.sub_32:gpr64 = MOVi32imm 1, implicit-def %3:gpr64 +# CHECK-DBG: 256B B %bb.7 +# CHECK-DBG: 272B bb.6: +# CHECK-DBG: ; predecessors: %bb.4 +# CHECK-DBG: successors: %bb.7(0x80000000); %bb.7(100.00%) +# CHECK-DBG: 288B %3:gpr64 = COPY $xzr +# CHECK-DBG: 304B bb.7: +# CHECK-DBG: ; predecessors: %bb.3, %bb.5, %bb.6 +# CHECK-DBG: successors: %bb.1(0x80000000); %bb.1(100.00%) +# CHECK-DBG: 320B %1:gpr64 = ADDXrs %1:gpr64, %3:gpr64, 1 +# CHECK-DBG: 352B B %bb.1 --- name: reproducer tracksRegLiveness: true @@ -92,6 +136,42 @@ body: | # CHECK-DBG-SAME: L0000000000000080 [224r,256B:1)[272r,288B:0)[288B,304r:3) 0@272r 1@224r 2@x 3@288B-phi # CHECK-DBG-SAME: L0000000000000040 [80r,160B:2)[224r,256B:1)[272r,288B:0)[288B,304r:3) 0@272r 1@224r 2@80r 3@288B-phi # CHECK-DBG-SAME: weight:0.000000e+00 +# CHECK-DBG: ********** MACHINEINSTRS ********** +# CHECK-DBG: 0B bb.0: +# CHECK-DBG: successors: %bb.1(0x80000000); %bb.1(100.00%) +# CHECK-DBG: 32B %1:gpr64 = IMPLICIT_DEF +# CHECK-DBG: 48B bb.1: +# CHECK-DBG: ; predecessors: %bb.0, %bb.7 +# CHECK-DBG: successors: %bb.2(0x80000000); %bb.2(100.00%) +# CHECK-DBG: 64B bb.2: +# CHECK-DBG: ; predecessors: %bb.1 +# CHECK-DBG: successors: %bb.3(0x80000000); %bb.3(100.00%) +# CHECK-DBG: 80B undef %3.sub_32:gpr64 = MOVi32imm 1 +# CHECK-DBG: 96B bb.3: +# CHECK-DBG: ; predecessors: %bb.2 +# CHECK-DBG: successors: %bb.7(0x40000000), %bb.4(0x40000000); %bb.7(50.00%), %bb.4(50.00%) +# CHECK-DBG: 112B $nzcv = IMPLICIT_DEF +# CHECK-DBG: 144B Bcc 1, %bb.7, implicit killed $nzcv +# CHECK-DBG: 160B bb.4: +# CHECK-DBG: ; predecessors: %bb.3 +# CHECK-DBG: successors: %bb.6(0x40000000), %bb.5(0x40000000); %bb.6(50.00%), %bb.5(50.00%) +# CHECK-DBG: 176B $nzcv = IMPLICIT_DEF +# CHECK-DBG: 192B Bcc 1, %bb.6, implicit killed $nzcv +# CHECK-DBG: 208B bb.5: +# CHECK-DBG: ; predecessors: %bb.4 +# CHECK-DBG: successors: %bb.7(0x80000000); %bb.7(100.00%) +# CHECK-DBG: 224B %3:gpr64 = IMPLICIT_DEF +# CHECK-DBG: 240B B %bb.7 +# CHECK-DBG: 256B bb.6: +# CHECK-DBG: ; predecessors: %bb.4 +# CHECK-DBG: successors: %bb.7(0x80000000); %bb.7(100.00%) +# CHECK-DBG: 272B %3:gpr64 = COPY $xzr +# CHECK-DBG: 288B bb.7: +# CHECK-DBG: ; predecessors: %bb.3, %bb.5, %bb.6 +# CHECK-DBG: successors: %bb.1(0x80000000); %bb.1(100.00%) +# CHECK-DBG: 304B %1:gpr64 = ADDXrs %1:gpr64, %3:gpr64, 1 +# CHECK-DBG: 336B B %bb.1 + --- name: reproducer2 tracksRegLiveness: true @@ -127,3 +207,78 @@ body: | B %bb.1 ... +# CHECK-DBG: ********** REGISTER COALESCER ********** +# CHECK-DBG: ********** Function: reproducer3 +# CHECK-DBG: ********** JOINING INTERVALS *********** +# CHECK-DBG: ********** INTERVALS ********** +# CHECK-DBG: W0 [0B,32r:0)[320r,336r:1) 0@0B-phi 1@320r +# CHECK-DBG: W1 [0B,16r:0) 0@0B-phi +# CHECK-DBG: %0 [16r,64r:0) 0@16r weight:0.000000e+00 +# CHECK-DBG: %1 [32r,128r:0) 0@32r weight:0.000000e+00 +# CHECK-DBG: %2 [48r,64r:0) 0@48r weight:0.000000e+00 +# CHECK-DBG: %3 [64r,80r:0) 0@64r weight:0.000000e+00 +# CHECK-DBG: %4 [80r,176r:0) 0@80r weight:0.000000e+00 +# CHECK-DBG: %7 [112r,128r:1)[128r,256r:0)[304B,320r:0) 0@128r 1@112r +# CHECK-DBG-SAME: L0000000000000080 [128r,256r:0)[304B,320r:0) 0@128r +# CHECK-DBG-SAME: L0000000000000040 [112r,128r:1)[128r,256r:0)[304B,320r:0) 0@128r 1@112r +# CHECK-DBG-SAME: weight:0.000000e+00 +# CHECK-DBG: %8 [96r,176r:1)[176r,192r:0) 0@176r 1@96r weight:0.000000e+00 +# CHECK-DBG: %9 [256r,272r:0) 0@256r weight:0.000000e+00 +# CHECK-DBG: ********** MACHINEINSTRS ********** +# CHECK-DBG: 0B bb.0: +# CHECK-DBG: successors: %bb.2(0x40000000), %bb.1(0x40000000); %bb.2(50.00%), %bb.1(50.00%) +# CHECK-DBG: liveins: $w0, $w1 +# CHECK-DBG: 16B %0:gpr32 = COPY $w1 +# CHECK-DBG: 32B %1:gpr32 = COPY $w0 +# CHECK-DBG: 48B %2:gpr32 = UBFMWri %1:gpr32, 31, 30 +# CHECK-DBG: 64B %3:gpr32 = SUBWrs %2:gpr32, %0:gpr32, 1 +# CHECK-DBG: 80B %4:gpr32 = UBFMWri %3:gpr32, 1, 31 +# CHECK-DBG: 96B %8:gpr32common = MOVi32imm 1 +# CHECK-DBG: 112B undef %7.sub_32:gpr64 = MOVi32imm 1 +# CHECK-DBG: 128B undef %7.sub_32:gpr64 = BFMWri %7.sub_32:gpr64(tied-def 0), %1:gpr32, 31, 30, implicit-def %7:gpr64 +# CHECK-DBG: 176B %8:gpr32common = BFMWri %8:gpr32common(tied-def 0), %4:gpr32, 30, 29 +# CHECK-DBG: 192B dead $wzr = SUBSWri %8:gpr32common, 0, 0, implicit-def $nzcv +# CHECK-DBG: 208B Bcc 2, %bb.2, implicit killed $nzcv +# CHECK-DBG: 224B B %bb.1 +# CHECK-DBG: 240B bb.1: +# CHECK-DBG: ; predecessors: %bb.0 +# CHECK-DBG: 256B %9:gpr64common = UBFMXri %7:gpr64, 62, 61 +# CHECK-DBG: 272B dead $xzr = LDRXui %9:gpr64common, 0 +# CHECK-DBG: 288B RET_ReallyLR +# CHECK-DBG: 304B bb.2: +# CHECK-DBG: ; predecessors: %bb.0 +# CHECK-DBG: 320B $x0 = COPY %7:gpr64 +# CHECK-DBG: 336B RET_ReallyLR implicit $x0 + +--- +name: reproducer3 +tracksRegLiveness: true +body: | + bb.0: + liveins: $w0, $w1 + + %0:gpr32 = COPY killed $w1 + %1:gpr32 = COPY killed $w0 + %3:gpr32 = UBFMWri %1, 31, 30 + %4:gpr32 = SUBWrs killed %3, killed %0, 1 + %5:gpr32 = UBFMWri killed %4, 1, 31 + %6:gpr32 = MOVi32imm 1 + %7:gpr32 = COPY %6 + %7:gpr32 = BFMWri %7, killed %1, 31, 30 + %8:gpr64 = SUBREG_TO_REG 0, killed %7, %subreg.sub_32 + %9:gpr32common = COPY killed %6 + %9:gpr32common = BFMWri %9, killed %5, 30, 29 + dead $wzr = SUBSWri killed %9, 0, 0, implicit-def $nzcv + Bcc 2, %bb.2, implicit killed $nzcv + B %bb.1 + + bb.1: + %10:gpr64common = UBFMXri killed %8, 62, 61 + dead $xzr = LDRXui killed %10, 0 + RET_ReallyLR + + bb.2: + $x0 = COPY killed %8 + RET_ReallyLR implicit killed $x0 + +... diff --git a/llvm/test/CodeGen/AArch64/selectopt-const.ll b/llvm/test/CodeGen/AArch64/selectopt-const.ll index a44c746..fe48dba 100644 --- a/llvm/test/CodeGen/AArch64/selectopt-const.ll +++ b/llvm/test/CodeGen/AArch64/selectopt-const.ll @@ -29,8 +29,8 @@ define i32 @test_const(ptr %in1, ptr %in2, ptr %out, i32 %n, ptr %tbl) { ; CHECK-NEXT: csel x10, x9, xzr, lt ; CHECK-NEXT: subs x8, x8, #1 ; CHECK-NEXT: ldr s3, [x4, x10] -; CHECK-NEXT: fcvtzs w10, s3 -; CHECK-NEXT: str w10, [x2], #4 +; CHECK-NEXT: fcvtzs s3, s3 +; CHECK-NEXT: st1 { v3.s }[0], [x2], #4 ; CHECK-NEXT: b.ne .LBB0_2 ; CHECK-NEXT: .LBB0_3: // %for.cond.cleanup ; CHECK-NEXT: mov w0, wzr diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll index c63899c..19ac03d 100644 --- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll +++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming -verify-machineinstrs < %s | FileCheck %s --check-prefixes=STRIDED ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CONTIGUOUS +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1,+sme2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CONTIGUOUS define <vscale x 32 x i8> @ld1_x2_i8_z0_z8(<vscale x 16 x i8> %unused, <vscale x 16 x i8> %z1, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ld1_x2_i8_z0_z8: diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll index 05241f7..039b621 100644 --- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll +++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming -verify-machineinstrs < %s | FileCheck %s --check-prefixes=STRIDED ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CONTIGUOUS +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1,+sme2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CONTIGUOUS define <vscale x 32 x i8> @ldnt1_x2_i8_z0_z8(<vscale x 16 x i8> %unused, <vscale x 16 x i8> %z1, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; STRIDED-LABEL: ldnt1_x2_i8_z0_z8: diff --git a/llvm/test/CodeGen/AArch64/store-float-conversion.ll b/llvm/test/CodeGen/AArch64/store-float-conversion.ll new file mode 100644 index 0000000..c46801f --- /dev/null +++ b/llvm/test/CodeGen/AArch64/store-float-conversion.ll @@ -0,0 +1,131 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -verify-machineinstrs -mtriple=aarch64 < %s | FileCheck %s + +define void @f32_to_u8(float %f, ptr %dst) { +; CHECK-LABEL: f32_to_u8: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fcvtzu s0, s0 +; CHECK-NEXT: str b0, [x0] +; CHECK-NEXT: ret +entry: + %conv = fptoui float %f to i32 + %trunc = trunc i32 %conv to i8 + store i8 %trunc, ptr %dst + ret void +} + +define void @f32_to_s8(float %f, ptr %dst) { +; CHECK-LABEL: f32_to_s8: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fcvtzs s0, s0 +; CHECK-NEXT: str b0, [x0] +; CHECK-NEXT: ret +entry: + %conv = fptosi float %f to i32 + %trunc = trunc i32 %conv to i8 + store i8 %trunc, ptr %dst + ret void +} + +define void @f32_to_u16(float %f, ptr %dst) { +; CHECK-LABEL: f32_to_u16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fcvtzu s0, s0 +; CHECK-NEXT: str h0, [x0] +; CHECK-NEXT: ret +entry: + %conv = fptoui float %f to i32 + %trunc = trunc i32 %conv to i16 + store i16 %trunc, ptr %dst + ret void +} + +define void @f32_to_s16(float %f, ptr %dst) { +; CHECK-LABEL: f32_to_s16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fcvtzs s0, s0 +; CHECK-NEXT: str h0, [x0] +; CHECK-NEXT: ret +entry: + %conv = fptosi float %f to i32 + %trunc = trunc i32 %conv to i16 + store i16 %trunc, ptr %dst + ret void +} + +define void @f32_to_u32(float %f, ptr %dst) { +; CHECK-LABEL: f32_to_u32: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fcvtzu s0, s0 +; CHECK-NEXT: str s0, [x0] +; CHECK-NEXT: ret +entry: + %conv = fptoui float %f to i32 + store i32 %conv, ptr %dst + ret void +} + +define void @f32_to_s32(float %f, ptr %dst) { +; CHECK-LABEL: f32_to_s32: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fcvtzs s0, s0 +; CHECK-NEXT: str s0, [x0] +; CHECK-NEXT: ret +entry: + %conv = fptosi float %f to i32 + store i32 %conv, ptr %dst + ret void +} + +define void @f32_to_s64(float %f, ptr %dst) { +; CHECK-LABEL: f32_to_s64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fcvtzs w8, s0 +; CHECK-NEXT: sxtw x8, w8 +; CHECK-NEXT: str x8, [x0] +; CHECK-NEXT: ret +entry: + %conv = fptosi float %f to i32 + %ext = sext i32 %conv to i64 + store i64 %ext, ptr %dst + ret void +} + +define void @f64_to_u64(double %d, ptr %dst) { +; CHECK-LABEL: f64_to_u64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fcvtzu d0, d0 +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret +entry: + %conv = fptoui double %d to i64 + store i64 %conv, ptr %dst + ret void +} + +define void @f64_to_s64(double %d, ptr %dst) { +; CHECK-LABEL: f64_to_s64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fcvtzs d0, d0 +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret +entry: + %conv = fptosi double %d to i64 + store i64 %conv, ptr %dst + ret void +} + +define i32 @f32_to_i32_multiple_uses(float %f, ptr %dst) { +; CHECK-LABEL: f32_to_i32_multiple_uses: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fcvtzs w8, s0 +; CHECK-NEXT: mov x9, x0 +; CHECK-NEXT: mov w0, w8 +; CHECK-NEXT: strb w8, [x9] +; CHECK-NEXT: ret +entry: + %conv = fptosi float %f to i32 + %trunc = trunc i32 %conv to i8 + store i8 %trunc, ptr %dst + ret i32 %conv +} diff --git a/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll b/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll index 52cb2d9..c7fb2db 100644 --- a/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll +++ b/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll @@ -267,7 +267,7 @@ define <vscale x 32 x i16> @interleave4_nxv8i16(<vscale x 8 x i16> %vec0, <vscal ; SME2-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; SME2-NEXT: zip { z0.h - z3.h }, { z0.h - z3.h } ; SME2-NEXT: ret - %retval = call <vscale x 32 x i16> @llvm.vector.interleave4.nxv8i16(<vscale x 8 x i16> %vec0, <vscale x 8 x i16> %vec1, <vscale x 8 x i16> %vec2, <vscale x 8 x i16> %vec3) + %retval = call <vscale x 32 x i16> @llvm.vector.interleave4.nxv32i16(<vscale x 8 x i16> %vec0, <vscale x 8 x i16> %vec1, <vscale x 8 x i16> %vec2, <vscale x 8 x i16> %vec3) ret <vscale x 32 x i16> %retval } @@ -540,30 +540,81 @@ define <vscale x 4 x i32> @interleave2_nxv2i32(<vscale x 2 x i32> %vec0, <vscale ret <vscale x 4 x i32> %retval } -; Float declarations -declare <vscale x 4 x half> @llvm.vector.interleave2.nxv4f16(<vscale x 2 x half>, <vscale x 2 x half>) -declare <vscale x 8 x half> @llvm.vector.interleave2.nxv8f16(<vscale x 4 x half>, <vscale x 4 x half>) -declare <vscale x 16 x half> @llvm.vector.interleave2.nxv16f16(<vscale x 8 x half>, <vscale x 8 x half>) -declare <vscale x 4 x float> @llvm.vector.interleave2.nxv4f32(<vscale x 2 x float>, <vscale x 2 x float>) -declare <vscale x 8 x float> @llvm.vector.interleave2.nxv8f32(<vscale x 4 x float>, <vscale x 4 x float>) -declare <vscale x 4 x double> @llvm.vector.interleave2.nxv4f64(<vscale x 2 x double>, <vscale x 2 x double>) +define <vscale x 4 x i16> @interleave2_same_const_splat_nxv4i16() { +; CHECK-LABEL: interleave2_same_const_splat_nxv4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.s, #3 // =0x3 +; CHECK-NEXT: ret + %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.nxv4i16(<vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3)) + ret <vscale x 4 x i16> %retval +} + +define <vscale x 4 x i16> @interleave2_diff_const_splat_nxv4i16() { +; SVE-LABEL: interleave2_diff_const_splat_nxv4i16: +; SVE: // %bb.0: +; SVE-NEXT: mov z0.d, #4 // =0x4 +; SVE-NEXT: mov z1.d, #3 // =0x3 +; SVE-NEXT: zip2 z2.d, z1.d, z0.d +; SVE-NEXT: zip1 z0.d, z1.d, z0.d +; SVE-NEXT: uzp1 z0.s, z0.s, z2.s +; SVE-NEXT: ret +; +; SME2-LABEL: interleave2_diff_const_splat_nxv4i16: +; SME2: // %bb.0: +; SME2-NEXT: mov z0.d, #4 // =0x4 +; SME2-NEXT: mov z1.d, #3 // =0x3 +; SME2-NEXT: zip { z0.d, z1.d }, z1.d, z0.d +; SME2-NEXT: uzp1 z0.s, z0.s, z1.s +; SME2-NEXT: ret + %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.v4i16(<vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 4)) + ret <vscale x 4 x i16> %retval +} -; Integer declarations -declare <vscale x 32 x i8> @llvm.vector.interleave2.nxv32i8(<vscale x 16 x i8>, <vscale x 16 x i8>) -declare <vscale x 16 x i16> @llvm.vector.interleave2.nxv16i16(<vscale x 8 x i16>, <vscale x 8 x i16>) -declare <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32>, <vscale x 4 x i32>) -declare <vscale x 4 x i64> @llvm.vector.interleave2.nxv4i64(<vscale x 2 x i64>, <vscale x 2 x i64>) +define <vscale x 4 x i16> @interleave2_same_nonconst_splat_nxv4i16(i16 %a) { +; CHECK-LABEL: interleave2_same_nonconst_splat_nxv4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.s, w0 +; CHECK-NEXT: ret + %ins = insertelement <vscale x 2 x i16> poison, i16 %a, i32 0 + %splat = shufflevector <vscale x 2 x i16> %ins, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer + %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.nxv4i16(<vscale x 2 x i16> %splat, <vscale x 2 x i16> %splat) + ret <vscale x 4 x i16> %retval +} -; Predicated -declare <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1>, <vscale x 16 x i1>) -declare <vscale x 16 x i1> @llvm.vector.interleave2.nxv16i1(<vscale x 8 x i1>, <vscale x 8 x i1>) -declare <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1>, <vscale x 4 x i1>) -declare <vscale x 4 x i1> @llvm.vector.interleave2.nxv4i1(<vscale x 2 x i1>, <vscale x 2 x i1>) - -; Illegal type size -declare <vscale x 16 x i32> @llvm.vector.interleave2.nxv16i32(<vscale x 8 x i32>, <vscale x 8 x i32>) -declare <vscale x 8 x i64> @llvm.vector.interleave2.nxv8i64(<vscale x 4 x i64>, <vscale x 4 x i64>) - -declare <vscale x 16 x i8> @llvm.vector.interleave2.nxv16i8(<vscale x 8 x i8>, <vscale x 8 x i8>) -declare <vscale x 8 x i16> @llvm.vector.interleave2.nxv8i16(<vscale x 4 x i16>, <vscale x 4 x i16>) -declare <vscale x 4 x i32> @llvm.vector.interleave2.nxv4i32(<vscale x 2 x i32>, <vscale x 2 x i32>) +define <vscale x 4 x i16> @interleave2_diff_nonconst_splat_nxv4i16(i16 %a, i16 %b) { +; SVE-LABEL: interleave2_diff_nonconst_splat_nxv4i16: +; SVE: // %bb.0: +; SVE-NEXT: // kill: def $w1 killed $w1 def $x1 +; SVE-NEXT: // kill: def $w0 killed $w0 def $x0 +; SVE-NEXT: mov z0.d, x0 +; SVE-NEXT: mov z1.d, x1 +; SVE-NEXT: zip2 z2.d, z0.d, z1.d +; SVE-NEXT: zip1 z0.d, z0.d, z1.d +; SVE-NEXT: uzp1 z0.s, z0.s, z2.s +; SVE-NEXT: ret +; +; SME2-LABEL: interleave2_diff_nonconst_splat_nxv4i16: +; SME2: // %bb.0: +; SME2-NEXT: // kill: def $w1 killed $w1 def $x1 +; SME2-NEXT: // kill: def $w0 killed $w0 def $x0 +; SME2-NEXT: mov z0.d, x0 +; SME2-NEXT: mov z1.d, x1 +; SME2-NEXT: zip { z0.d, z1.d }, z0.d, z1.d +; SME2-NEXT: uzp1 z0.s, z0.s, z1.s +; SME2-NEXT: ret + %ins1 = insertelement <vscale x 2 x i16> poison, i16 %a, i32 0 + %splat1 = shufflevector <vscale x 2 x i16> %ins1, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer + %ins2 = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0 + %splat2 = shufflevector <vscale x 2 x i16> %ins2, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer + %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.nxv4i16(<vscale x 2 x i16> %splat1, <vscale x 2 x i16> %splat2) + ret <vscale x 4 x i16> %retval +} + +define <vscale x 8 x i16> @interleave4_same_const_splat_nxv8i16() { +; CHECK-LABEL: interleave4_same_const_splat_nxv8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.h, #3 // =0x3 +; CHECK-NEXT: ret + %retval = call <vscale x 8 x i16> @llvm.vector.interleave4.nxv8i16(<vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3)) + ret <vscale x 8 x i16> %retval +} diff --git a/llvm/test/CodeGen/AArch64/sve-vscale-combine.ll b/llvm/test/CodeGen/AArch64/sve-vscale-combine.ll index 9306c20..7dcd56c 100644 --- a/llvm/test/CodeGen/AArch64/sve-vscale-combine.ll +++ b/llvm/test/CodeGen/AArch64/sve-vscale-combine.ll @@ -1,14 +1,14 @@ -; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve --asm-verbose=false < %s |FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mattr=+sve < %s | FileCheck %s -declare i32 @llvm.vscale.i32() -declare i64 @llvm.vscale.i64() +target triple = "aarch64-unknown-linux-gnu" ; Fold (add (vscale * C0), (vscale * C1)) to (vscale * (C0 + C1)). define i64 @combine_add_vscale_i64() nounwind { ; CHECK-LABEL: combine_add_vscale_i64: -; CHECK-NOT: add -; CHECK-NEXT: cntd x0 -; CHECK-NEXT: ret +; CHECK: // %bb.0: +; CHECK-NEXT: cntd x0 +; CHECK-NEXT: ret %vscale = call i64 @llvm.vscale.i64() %add = add i64 %vscale, %vscale ret i64 %add @@ -16,9 +16,10 @@ define i64 @combine_add_vscale_i64() nounwind { define i32 @combine_add_vscale_i32() nounwind { ; CHECK-LABEL: combine_add_vscale_i32: -; CHECK-NOT: add -; CHECK-NEXT: cntd x0 -; CHECK-NEXT: ret +; CHECK: // %bb.0: +; CHECK-NEXT: cntd x0 +; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: ret %vscale = call i32 @llvm.vscale.i32() %add = add i32 %vscale, %vscale ret i32 %add @@ -28,9 +29,9 @@ define i32 @combine_add_vscale_i32() nounwind { ; In this test, C0 = 1, C1 = 32. define i64 @combine_mul_vscale_i64() nounwind { ; CHECK-LABEL: combine_mul_vscale_i64: -; CHECK-NOT: mul -; CHECK-NEXT: rdvl x0, #2 -; CHECK-NEXT: ret +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x0, #2 +; CHECK-NEXT: ret %vscale = call i64 @llvm.vscale.i64() %mul = mul i64 %vscale, 32 ret i64 %mul @@ -38,9 +39,10 @@ define i64 @combine_mul_vscale_i64() nounwind { define i32 @combine_mul_vscale_i32() nounwind { ; CHECK-LABEL: combine_mul_vscale_i32: -; CHECK-NOT: mul -; CHECK-NEXT: rdvl x0, #3 -; CHECK-NEXT: ret +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x0, #3 +; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: ret %vscale = call i32 @llvm.vscale.i32() %mul = mul i32 %vscale, 48 ret i32 %mul @@ -49,11 +51,11 @@ define i32 @combine_mul_vscale_i32() nounwind { ; Canonicalize (sub X, (vscale * C)) to (add X, (vscale * -C)) define i64 @combine_sub_vscale_i64(i64 %in) nounwind { ; CHECK-LABEL: combine_sub_vscale_i64: -; CHECK-NOT: sub -; CHECK-NEXT: rdvl x8, #-1 -; CHECK-NEXT: asr x8, x8, #4 -; CHECK-NEXT: add x0, x0, x8 -; CHECK-NEXT: ret +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #-1 +; CHECK-NEXT: asr x8, x8, #4 +; CHECK-NEXT: add x0, x0, x8 +; CHECK-NEXT: ret %vscale = call i64 @llvm.vscale.i64() %sub = sub i64 %in, %vscale ret i64 %sub @@ -61,11 +63,11 @@ define i64 @combine_sub_vscale_i64(i64 %in) nounwind { define i32 @combine_sub_vscale_i32(i32 %in) nounwind { ; CHECK-LABEL: combine_sub_vscale_i32: -; CHECK-NOT: sub -; CHECK-NEXT: rdvl x8, #-1 -; CHECK-NEXT: asr x8, x8, #4 -; CHECK-NEXT: add w0, w0, w8 -; CHECK-NEXT: ret +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #-1 +; CHECK-NEXT: asr x8, x8, #4 +; CHECK-NEXT: add w0, w0, w8 +; CHECK-NEXT: ret %vscale = call i32 @llvm.vscale.i32() %sub = sub i32 %in, %vscale ret i32 %sub @@ -75,12 +77,13 @@ define i32 @combine_sub_vscale_i32(i32 %in) nounwind { ; (sub X, (vscale * C)) to (add X, (vscale * -C)) define i64 @multiple_uses_sub_vscale_i64(i64 %x, i64 %y) nounwind { ; CHECK-LABEL: multiple_uses_sub_vscale_i64: -; CHECK-NEXT: rdvl x8, #1 -; CHECK-NEXT: lsr x8, x8, #4 -; CHECK-NEXT: sub x9, x0, x8 -; CHECK-NEXT: add x8, x1, x8 -; CHECK-NEXT: mul x0, x9, x8 -; CHECK-NEXT: ret +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: sub x9, x0, x8 +; CHECK-NEXT: add x8, x1, x8 +; CHECK-NEXT: mul x0, x9, x8 +; CHECK-NEXT: ret %vscale = call i64 @llvm.vscale.i64() %sub = sub i64 %x, %vscale %add = add i64 %y, %vscale @@ -95,9 +98,9 @@ define i64 @multiple_uses_sub_vscale_i64(i64 %x, i64 %y) nounwind { ; Hence, the immediate for RDVL is #1. define i64 @combine_shl_vscale_i64() nounwind { ; CHECK-LABEL: combine_shl_vscale_i64: -; CHECK-NOT: shl -; CHECK-NEXT: rdvl x0, #1 -; CHECK-NEXT: ret +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x0, #1 +; CHECK-NEXT: ret %vscale = call i64 @llvm.vscale.i64() %shl = shl i64 %vscale, 4 ret i64 %shl @@ -105,10 +108,38 @@ define i64 @combine_shl_vscale_i64() nounwind { define i32 @combine_shl_vscale_i32() nounwind { ; CHECK-LABEL: combine_shl_vscale_i32: -; CHECK-NOT: shl -; CHECK-NEXT: rdvl x0, #1 -; CHECK-NEXT: ret +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x0, #1 +; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: ret %vscale = call i32 @llvm.vscale.i32() %shl = shl i32 %vscale, 4 ret i32 %shl } + +define i64 @combine_shl_mul_vscale(i64 %a) nounwind { +; CHECK-LABEL: combine_shl_mul_vscale: +; CHECK: // %bb.0: +; CHECK-NEXT: cnth x8 +; CHECK-NEXT: mul x0, x0, x8 +; CHECK-NEXT: ret + %vscale = tail call i64 @llvm.vscale.i64() + %mul = mul i64 %a, %vscale + %shl = shl i64 %mul, 3 + ret i64 %shl +} + +define i64 @combine_shl_mul_vscale_commuted(i64 %a) nounwind { +; CHECK-LABEL: combine_shl_mul_vscale_commuted: +; CHECK: // %bb.0: +; CHECK-NEXT: cnth x8 +; CHECK-NEXT: mul x0, x0, x8 +; CHECK-NEXT: ret + %vscale = tail call i64 @llvm.vscale.i64() + %mul = mul i64 %vscale, %a + %shl = shl i64 %mul, 3 + ret i64 %shl +} + +declare i32 @llvm.vscale.i32() +declare i64 @llvm.vscale.i64() diff --git a/llvm/test/CodeGen/AArch64/tbl-loops.ll b/llvm/test/CodeGen/AArch64/tbl-loops.ll index aa0a163..5fc996a 100644 --- a/llvm/test/CodeGen/AArch64/tbl-loops.ll +++ b/llvm/test/CodeGen/AArch64/tbl-loops.ll @@ -63,7 +63,8 @@ define void @loop1(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n ; CHECK-NEXT: fcmp s2, #0.0 ; CHECK-NEXT: fcsel s2, s0, s3, mi ; CHECK-NEXT: subs w10, w10, #1 -; CHECK-NEXT: fcvtzs w11, s2 +; CHECK-NEXT: fcvtzs s2, s2 +; CHECK-NEXT: fmov w11, s2 ; CHECK-NEXT: strb w11, [x9], #1 ; CHECK-NEXT: b.ne .LBB0_7 ; CHECK-NEXT: .LBB0_8: // %for.cond.cleanup @@ -178,12 +179,12 @@ define void @loop2(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n ; CHECK-NEXT: fcmp s3, s1 ; CHECK-NEXT: fcsel s4, s1, s3, gt ; CHECK-NEXT: fcmp s3, #0.0 -; CHECK-NEXT: fcvtzs w11, s2 +; CHECK-NEXT: fcvtzs s2, s2 ; CHECK-NEXT: fcsel s3, s0, s4, mi ; CHECK-NEXT: subs w10, w10, #1 -; CHECK-NEXT: strb w11, [x9] -; CHECK-NEXT: fcvtzs w12, s3 -; CHECK-NEXT: strb w12, [x9, #1] +; CHECK-NEXT: str b2, [x9] +; CHECK-NEXT: fcvtzs s3, s3 +; CHECK-NEXT: stur b3, [x9, #1] ; CHECK-NEXT: add x9, x9, #2 ; CHECK-NEXT: b.ne .LBB1_6 ; CHECK-NEXT: .LBB1_7: // %for.cond.cleanup @@ -395,19 +396,19 @@ define void @loop3(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n ; CHECK-NEXT: fcsel s4, s1, s3, gt ; CHECK-NEXT: fcmp s3, #0.0 ; CHECK-NEXT: ldr s3, [x8, #8] -; CHECK-NEXT: fcvtzs w11, s2 +; CHECK-NEXT: fcvtzs s2, s2 ; CHECK-NEXT: add x8, x8, #12 ; CHECK-NEXT: fcsel s4, s0, s4, mi ; CHECK-NEXT: fcmp s3, s1 -; CHECK-NEXT: strb w11, [x9] +; CHECK-NEXT: str b2, [x9] ; CHECK-NEXT: fcsel s5, s1, s3, gt ; CHECK-NEXT: fcmp s3, #0.0 -; CHECK-NEXT: fcvtzs w12, s4 +; CHECK-NEXT: fcvtzs s4, s4 ; CHECK-NEXT: fcsel s3, s0, s5, mi ; CHECK-NEXT: subs w10, w10, #1 -; CHECK-NEXT: strb w12, [x9, #1] -; CHECK-NEXT: fcvtzs w13, s3 -; CHECK-NEXT: strb w13, [x9, #2] +; CHECK-NEXT: stur b4, [x9, #1] +; CHECK-NEXT: fcvtzs s3, s3 +; CHECK-NEXT: stur b3, [x9, #2] ; CHECK-NEXT: add x9, x9, #3 ; CHECK-NEXT: b.ne .LBB2_8 ; CHECK-NEXT: .LBB2_9: // %for.cond.cleanup @@ -563,26 +564,26 @@ define void @loop4(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n ; CHECK-NEXT: fcmp s3, s1 ; CHECK-NEXT: fcsel s4, s1, s3, gt ; CHECK-NEXT: fcmp s3, #0.0 -; CHECK-NEXT: fcvtzs w11, s2 +; CHECK-NEXT: fcvtzs s2, s2 ; CHECK-NEXT: ldp s3, s5, [x8, #8] ; CHECK-NEXT: add x8, x8, #16 ; CHECK-NEXT: fcsel s4, s0, s4, mi ; CHECK-NEXT: fcmp s3, s1 -; CHECK-NEXT: strb w11, [x9] -; CHECK-NEXT: fcvtzs w12, s4 +; CHECK-NEXT: str b2, [x9] +; CHECK-NEXT: fcvtzs s4, s4 ; CHECK-NEXT: fcsel s6, s1, s3, gt ; CHECK-NEXT: fcmp s3, #0.0 ; CHECK-NEXT: fcsel s3, s0, s6, mi ; CHECK-NEXT: fcmp s5, s1 -; CHECK-NEXT: strb w12, [x9, #1] +; CHECK-NEXT: stur b4, [x9, #1] ; CHECK-NEXT: fcsel s6, s1, s5, gt ; CHECK-NEXT: fcmp s5, #0.0 -; CHECK-NEXT: fcvtzs w13, s3 -; CHECK-NEXT: fcsel s2, s0, s6, mi +; CHECK-NEXT: fcvtzs s3, s3 +; CHECK-NEXT: fcsel s5, s0, s6, mi ; CHECK-NEXT: subs w10, w10, #1 -; CHECK-NEXT: strb w13, [x9, #2] -; CHECK-NEXT: fcvtzs w14, s2 -; CHECK-NEXT: strb w14, [x9, #3] +; CHECK-NEXT: stur b3, [x9, #2] +; CHECK-NEXT: fcvtzs s5, s5 +; CHECK-NEXT: stur b5, [x9, #3] ; CHECK-NEXT: add x9, x9, #4 ; CHECK-NEXT: b.ne .LBB3_6 ; CHECK-NEXT: .LBB3_7: // %for.cond.cleanup diff --git a/llvm/test/CodeGen/AArch64/vecreduce-add.ll b/llvm/test/CodeGen/AArch64/vecreduce-add.ll index 290a473..74d1165 100644 --- a/llvm/test/CodeGen/AArch64/vecreduce-add.ll +++ b/llvm/test/CodeGen/AArch64/vecreduce-add.ll @@ -1907,11 +1907,8 @@ define i32 @test_udot_v8i8(<8 x i8> %a, <8 x i8> %b) { ; ; CHECK-GI-BASE-LABEL: test_udot_v8i8: ; CHECK-GI-BASE: // %bb.0: // %entry -; CHECK-GI-BASE-NEXT: ushll v0.8h, v0.8b, #0 -; CHECK-GI-BASE-NEXT: ushll v1.8h, v1.8b, #0 -; CHECK-GI-BASE-NEXT: umull v2.4s, v1.4h, v0.4h -; CHECK-GI-BASE-NEXT: umlal2 v2.4s, v1.8h, v0.8h -; CHECK-GI-BASE-NEXT: addv s0, v2.4s +; CHECK-GI-BASE-NEXT: umull v0.8h, v1.8b, v0.8b +; CHECK-GI-BASE-NEXT: uaddlv s0, v0.8h ; CHECK-GI-BASE-NEXT: fmov w0, s0 ; CHECK-GI-BASE-NEXT: ret ; @@ -1952,17 +1949,13 @@ define i32 @test_udot_v16i8(<16 x i8> %a, <16 x i8> %b) { ; ; CHECK-GI-BASE-LABEL: test_udot_v16i8: ; CHECK-GI-BASE: // %bb.0: // %entry -; CHECK-GI-BASE-NEXT: ushll v2.8h, v0.8b, #0 -; CHECK-GI-BASE-NEXT: ushll2 v0.8h, v0.16b, #0 -; CHECK-GI-BASE-NEXT: ushll v3.8h, v1.8b, #0 -; CHECK-GI-BASE-NEXT: ushll2 v1.8h, v1.16b, #0 -; CHECK-GI-BASE-NEXT: umull v4.4s, v3.4h, v2.4h -; CHECK-GI-BASE-NEXT: umull v5.4s, v1.4h, v0.4h -; CHECK-GI-BASE-NEXT: umlal2 v4.4s, v3.8h, v2.8h -; CHECK-GI-BASE-NEXT: umlal2 v5.4s, v1.8h, v0.8h -; CHECK-GI-BASE-NEXT: add v0.4s, v4.4s, v5.4s -; CHECK-GI-BASE-NEXT: addv s0, v0.4s -; CHECK-GI-BASE-NEXT: fmov w0, s0 +; CHECK-GI-BASE-NEXT: umull v2.8h, v1.8b, v0.8b +; CHECK-GI-BASE-NEXT: umull2 v0.8h, v1.16b, v0.16b +; CHECK-GI-BASE-NEXT: uaddlv s1, v2.8h +; CHECK-GI-BASE-NEXT: uaddlv s0, v0.8h +; CHECK-GI-BASE-NEXT: fmov w8, s1 +; CHECK-GI-BASE-NEXT: fmov w9, s0 +; CHECK-GI-BASE-NEXT: add w0, w8, w9 ; CHECK-GI-BASE-NEXT: ret ; ; CHECK-GI-DOT-LABEL: test_udot_v16i8: @@ -2018,36 +2011,21 @@ define i32 @test_udot_v24i8(ptr %p1, ptr %p2) { ; ; CHECK-GI-BASE-LABEL: test_udot_v24i8: ; CHECK-GI-BASE: // %bb.0: // %entry -; CHECK-GI-BASE-NEXT: fmov s0, wzr -; CHECK-GI-BASE-NEXT: fmov s1, wzr -; CHECK-GI-BASE-NEXT: ldr q2, [x0] -; CHECK-GI-BASE-NEXT: ldr d3, [x0, #16] -; CHECK-GI-BASE-NEXT: ldr q4, [x1] -; CHECK-GI-BASE-NEXT: ldr d5, [x1, #16] -; CHECK-GI-BASE-NEXT: ushll v6.8h, v2.8b, #0 -; CHECK-GI-BASE-NEXT: ushll2 v2.8h, v2.16b, #0 -; CHECK-GI-BASE-NEXT: mov v0.s[1], wzr -; CHECK-GI-BASE-NEXT: mov v1.s[1], wzr -; CHECK-GI-BASE-NEXT: ushll v3.8h, v3.8b, #0 -; CHECK-GI-BASE-NEXT: ushll v7.8h, v4.8b, #0 -; CHECK-GI-BASE-NEXT: ushll2 v4.8h, v4.16b, #0 -; CHECK-GI-BASE-NEXT: ushll v5.8h, v5.8b, #0 -; CHECK-GI-BASE-NEXT: mov v0.s[2], wzr -; CHECK-GI-BASE-NEXT: mov v1.s[2], wzr -; CHECK-GI-BASE-NEXT: umull v16.4s, v7.4h, v6.4h -; CHECK-GI-BASE-NEXT: umull v17.4s, v4.4h, v2.4h -; CHECK-GI-BASE-NEXT: umull v18.4s, v5.4h, v3.4h -; CHECK-GI-BASE-NEXT: mov v0.s[3], wzr -; CHECK-GI-BASE-NEXT: mov v1.s[3], wzr -; CHECK-GI-BASE-NEXT: umlal2 v16.4s, v7.8h, v6.8h -; CHECK-GI-BASE-NEXT: umlal2 v17.4s, v4.8h, v2.8h -; CHECK-GI-BASE-NEXT: umlal2 v18.4s, v5.8h, v3.8h -; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v1.4s -; CHECK-GI-BASE-NEXT: add v1.4s, v16.4s, v17.4s -; CHECK-GI-BASE-NEXT: add v0.4s, v18.4s, v0.4s -; CHECK-GI-BASE-NEXT: add v0.4s, v1.4s, v0.4s -; CHECK-GI-BASE-NEXT: addv s0, v0.4s -; CHECK-GI-BASE-NEXT: fmov w0, s0 +; CHECK-GI-BASE-NEXT: ldr q0, [x0] +; CHECK-GI-BASE-NEXT: ldr q1, [x1] +; CHECK-GI-BASE-NEXT: ldr d2, [x0, #16] +; CHECK-GI-BASE-NEXT: ldr d3, [x1, #16] +; CHECK-GI-BASE-NEXT: umull v4.8h, v1.8b, v0.8b +; CHECK-GI-BASE-NEXT: umull2 v0.8h, v1.16b, v0.16b +; CHECK-GI-BASE-NEXT: umull v1.8h, v3.8b, v2.8b +; CHECK-GI-BASE-NEXT: uaddlv s2, v4.8h +; CHECK-GI-BASE-NEXT: uaddlv s0, v0.8h +; CHECK-GI-BASE-NEXT: uaddlv s1, v1.8h +; CHECK-GI-BASE-NEXT: fmov w8, s2 +; CHECK-GI-BASE-NEXT: fmov w9, s0 +; CHECK-GI-BASE-NEXT: add w8, w8, w9 +; CHECK-GI-BASE-NEXT: fmov w9, s1 +; CHECK-GI-BASE-NEXT: add w0, w8, w9 ; CHECK-GI-BASE-NEXT: ret ; ; CHECK-GI-DOT-LABEL: test_udot_v24i8: @@ -2118,61 +2096,33 @@ define i32 @test_udot_v48i8(ptr %p1, ptr %p2) { ; ; CHECK-GI-BASE-LABEL: test_udot_v48i8: ; CHECK-GI-BASE: // %bb.0: // %entry -; CHECK-GI-BASE-NEXT: fmov s0, wzr -; CHECK-GI-BASE-NEXT: fmov s2, wzr -; CHECK-GI-BASE-NEXT: ldr q16, [x0, #32] -; CHECK-GI-BASE-NEXT: fmov s1, wzr -; CHECK-GI-BASE-NEXT: fmov s3, wzr -; CHECK-GI-BASE-NEXT: ldr q19, [x1, #32] -; CHECK-GI-BASE-NEXT: ldp q5, q7, [x1] -; CHECK-GI-BASE-NEXT: ushll v23.8h, v16.8b, #0 -; CHECK-GI-BASE-NEXT: mov v0.s[1], wzr -; CHECK-GI-BASE-NEXT: mov v2.s[1], wzr -; CHECK-GI-BASE-NEXT: ushll v20.8h, v19.8b, #0 -; CHECK-GI-BASE-NEXT: mov v1.s[1], wzr -; CHECK-GI-BASE-NEXT: mov v3.s[1], wzr -; CHECK-GI-BASE-NEXT: ushll2 v19.8h, v19.16b, #0 -; CHECK-GI-BASE-NEXT: ldp q18, q17, [x0] -; CHECK-GI-BASE-NEXT: ushll v4.8h, v5.8b, #0 -; CHECK-GI-BASE-NEXT: ushll2 v5.8h, v5.16b, #0 -; CHECK-GI-BASE-NEXT: ushll v6.8h, v7.8b, #0 -; CHECK-GI-BASE-NEXT: ushll2 v7.8h, v7.16b, #0 -; CHECK-GI-BASE-NEXT: ushll2 v16.8h, v16.16b, #0 -; CHECK-GI-BASE-NEXT: mov v0.s[2], wzr -; CHECK-GI-BASE-NEXT: mov v2.s[2], wzr -; CHECK-GI-BASE-NEXT: ushll v21.8h, v18.8b, #0 -; CHECK-GI-BASE-NEXT: ushll2 v18.8h, v18.16b, #0 -; CHECK-GI-BASE-NEXT: ushll v22.8h, v17.8b, #0 -; CHECK-GI-BASE-NEXT: ushll2 v17.8h, v17.16b, #0 -; CHECK-GI-BASE-NEXT: mov v1.s[2], wzr -; CHECK-GI-BASE-NEXT: mov v3.s[2], wzr -; CHECK-GI-BASE-NEXT: umull v28.4s, v20.4h, v23.4h -; CHECK-GI-BASE-NEXT: umull v29.4s, v19.4h, v16.4h -; CHECK-GI-BASE-NEXT: umull v24.4s, v4.4h, v21.4h -; CHECK-GI-BASE-NEXT: umull v25.4s, v5.4h, v18.4h -; CHECK-GI-BASE-NEXT: umull v26.4s, v6.4h, v22.4h -; CHECK-GI-BASE-NEXT: umull v27.4s, v7.4h, v17.4h -; CHECK-GI-BASE-NEXT: mov v0.s[3], wzr -; CHECK-GI-BASE-NEXT: mov v2.s[3], wzr -; CHECK-GI-BASE-NEXT: mov v1.s[3], wzr -; CHECK-GI-BASE-NEXT: mov v3.s[3], wzr -; CHECK-GI-BASE-NEXT: umlal2 v28.4s, v20.8h, v23.8h -; CHECK-GI-BASE-NEXT: umlal2 v29.4s, v19.8h, v16.8h -; CHECK-GI-BASE-NEXT: umlal2 v24.4s, v4.8h, v21.8h -; CHECK-GI-BASE-NEXT: umlal2 v25.4s, v5.8h, v18.8h -; CHECK-GI-BASE-NEXT: umlal2 v26.4s, v6.8h, v22.8h -; CHECK-GI-BASE-NEXT: umlal2 v27.4s, v7.8h, v17.8h -; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v2.4s -; CHECK-GI-BASE-NEXT: add v1.4s, v1.4s, v3.4s -; CHECK-GI-BASE-NEXT: add v4.4s, v28.4s, v29.4s -; CHECK-GI-BASE-NEXT: add v2.4s, v24.4s, v25.4s -; CHECK-GI-BASE-NEXT: add v3.4s, v26.4s, v27.4s -; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v1.4s -; CHECK-GI-BASE-NEXT: add v1.4s, v2.4s, v3.4s -; CHECK-GI-BASE-NEXT: add v0.4s, v4.4s, v0.4s -; CHECK-GI-BASE-NEXT: add v0.4s, v1.4s, v0.4s -; CHECK-GI-BASE-NEXT: addv s0, v0.4s -; CHECK-GI-BASE-NEXT: fmov w0, s0 +; CHECK-GI-BASE-NEXT: ldp q0, q1, [x0] +; CHECK-GI-BASE-NEXT: ldr q3, [x0, #32] +; CHECK-GI-BASE-NEXT: ldp q2, q4, [x1] +; CHECK-GI-BASE-NEXT: ldr q5, [x1, #32] +; CHECK-GI-BASE-NEXT: umull v7.8h, v5.8b, v3.8b +; CHECK-GI-BASE-NEXT: umull2 v3.8h, v5.16b, v3.16b +; CHECK-GI-BASE-NEXT: umull v6.8h, v2.8b, v0.8b +; CHECK-GI-BASE-NEXT: umull2 v0.8h, v2.16b, v0.16b +; CHECK-GI-BASE-NEXT: umull2 v2.8h, v4.16b, v1.16b +; CHECK-GI-BASE-NEXT: umull v1.8h, v4.8b, v1.8b +; CHECK-GI-BASE-NEXT: uaddlv s5, v7.8h +; CHECK-GI-BASE-NEXT: uaddlv s3, v3.8h +; CHECK-GI-BASE-NEXT: uaddlv s4, v6.8h +; CHECK-GI-BASE-NEXT: uaddlv s0, v0.8h +; CHECK-GI-BASE-NEXT: uaddlv s2, v2.8h +; CHECK-GI-BASE-NEXT: uaddlv s1, v1.8h +; CHECK-GI-BASE-NEXT: fmov w11, s5 +; CHECK-GI-BASE-NEXT: fmov w8, s4 +; CHECK-GI-BASE-NEXT: fmov w9, s0 +; CHECK-GI-BASE-NEXT: fmov w10, s2 +; CHECK-GI-BASE-NEXT: add w8, w8, w9 +; CHECK-GI-BASE-NEXT: fmov w9, s1 +; CHECK-GI-BASE-NEXT: add w10, w10, w11 +; CHECK-GI-BASE-NEXT: fmov w11, s3 +; CHECK-GI-BASE-NEXT: add w8, w8, w9 +; CHECK-GI-BASE-NEXT: add w9, w10, w11 +; CHECK-GI-BASE-NEXT: add w0, w8, w9 ; CHECK-GI-BASE-NEXT: ret ; ; CHECK-GI-DOT-LABEL: test_udot_v48i8: @@ -2225,11 +2175,8 @@ define i32 @test_sdot_v8i8(<8 x i8> %a, <8 x i8> %b) { ; ; CHECK-GI-BASE-LABEL: test_sdot_v8i8: ; CHECK-GI-BASE: // %bb.0: // %entry -; CHECK-GI-BASE-NEXT: sshll v0.8h, v0.8b, #0 -; CHECK-GI-BASE-NEXT: sshll v1.8h, v1.8b, #0 -; CHECK-GI-BASE-NEXT: smull v2.4s, v1.4h, v0.4h -; CHECK-GI-BASE-NEXT: smlal2 v2.4s, v1.8h, v0.8h -; CHECK-GI-BASE-NEXT: addv s0, v2.4s +; CHECK-GI-BASE-NEXT: smull v0.8h, v1.8b, v0.8b +; CHECK-GI-BASE-NEXT: saddlv s0, v0.8h ; CHECK-GI-BASE-NEXT: fmov w0, s0 ; CHECK-GI-BASE-NEXT: ret ; @@ -2270,17 +2217,13 @@ define i32 @test_sdot_v16i8(<16 x i8> %a, <16 x i8> %b) { ; ; CHECK-GI-BASE-LABEL: test_sdot_v16i8: ; CHECK-GI-BASE: // %bb.0: // %entry -; CHECK-GI-BASE-NEXT: sshll v2.8h, v0.8b, #0 -; CHECK-GI-BASE-NEXT: sshll2 v0.8h, v0.16b, #0 -; CHECK-GI-BASE-NEXT: sshll v3.8h, v1.8b, #0 -; CHECK-GI-BASE-NEXT: sshll2 v1.8h, v1.16b, #0 -; CHECK-GI-BASE-NEXT: smull v4.4s, v3.4h, v2.4h -; CHECK-GI-BASE-NEXT: smull v5.4s, v1.4h, v0.4h -; CHECK-GI-BASE-NEXT: smlal2 v4.4s, v3.8h, v2.8h -; CHECK-GI-BASE-NEXT: smlal2 v5.4s, v1.8h, v0.8h -; CHECK-GI-BASE-NEXT: add v0.4s, v4.4s, v5.4s -; CHECK-GI-BASE-NEXT: addv s0, v0.4s -; CHECK-GI-BASE-NEXT: fmov w0, s0 +; CHECK-GI-BASE-NEXT: smull v2.8h, v1.8b, v0.8b +; CHECK-GI-BASE-NEXT: smull2 v0.8h, v1.16b, v0.16b +; CHECK-GI-BASE-NEXT: saddlv s1, v2.8h +; CHECK-GI-BASE-NEXT: saddlv s0, v0.8h +; CHECK-GI-BASE-NEXT: fmov w8, s1 +; CHECK-GI-BASE-NEXT: fmov w9, s0 +; CHECK-GI-BASE-NEXT: add w0, w8, w9 ; CHECK-GI-BASE-NEXT: ret ; ; CHECK-GI-DOT-LABEL: test_sdot_v16i8: @@ -2336,36 +2279,21 @@ define i32 @test_sdot_v24i8(ptr %p1, ptr %p2) { ; ; CHECK-GI-BASE-LABEL: test_sdot_v24i8: ; CHECK-GI-BASE: // %bb.0: // %entry -; CHECK-GI-BASE-NEXT: fmov s0, wzr -; CHECK-GI-BASE-NEXT: fmov s1, wzr -; CHECK-GI-BASE-NEXT: ldr q2, [x0] -; CHECK-GI-BASE-NEXT: ldr d3, [x0, #16] -; CHECK-GI-BASE-NEXT: ldr q4, [x1] -; CHECK-GI-BASE-NEXT: ldr d5, [x1, #16] -; CHECK-GI-BASE-NEXT: sshll v6.8h, v2.8b, #0 -; CHECK-GI-BASE-NEXT: sshll2 v2.8h, v2.16b, #0 -; CHECK-GI-BASE-NEXT: mov v0.s[1], wzr -; CHECK-GI-BASE-NEXT: mov v1.s[1], wzr -; CHECK-GI-BASE-NEXT: sshll v3.8h, v3.8b, #0 -; CHECK-GI-BASE-NEXT: sshll v7.8h, v4.8b, #0 -; CHECK-GI-BASE-NEXT: sshll2 v4.8h, v4.16b, #0 -; CHECK-GI-BASE-NEXT: sshll v5.8h, v5.8b, #0 -; CHECK-GI-BASE-NEXT: mov v0.s[2], wzr -; CHECK-GI-BASE-NEXT: mov v1.s[2], wzr -; CHECK-GI-BASE-NEXT: smull v16.4s, v7.4h, v6.4h -; CHECK-GI-BASE-NEXT: smull v17.4s, v4.4h, v2.4h -; CHECK-GI-BASE-NEXT: smull v18.4s, v5.4h, v3.4h -; CHECK-GI-BASE-NEXT: mov v0.s[3], wzr -; CHECK-GI-BASE-NEXT: mov v1.s[3], wzr -; CHECK-GI-BASE-NEXT: smlal2 v16.4s, v7.8h, v6.8h -; CHECK-GI-BASE-NEXT: smlal2 v17.4s, v4.8h, v2.8h -; CHECK-GI-BASE-NEXT: smlal2 v18.4s, v5.8h, v3.8h -; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v1.4s -; CHECK-GI-BASE-NEXT: add v1.4s, v16.4s, v17.4s -; CHECK-GI-BASE-NEXT: add v0.4s, v18.4s, v0.4s -; CHECK-GI-BASE-NEXT: add v0.4s, v1.4s, v0.4s -; CHECK-GI-BASE-NEXT: addv s0, v0.4s -; CHECK-GI-BASE-NEXT: fmov w0, s0 +; CHECK-GI-BASE-NEXT: ldr q0, [x0] +; CHECK-GI-BASE-NEXT: ldr q1, [x1] +; CHECK-GI-BASE-NEXT: ldr d2, [x0, #16] +; CHECK-GI-BASE-NEXT: ldr d3, [x1, #16] +; CHECK-GI-BASE-NEXT: smull v4.8h, v1.8b, v0.8b +; CHECK-GI-BASE-NEXT: smull2 v0.8h, v1.16b, v0.16b +; CHECK-GI-BASE-NEXT: smull v1.8h, v3.8b, v2.8b +; CHECK-GI-BASE-NEXT: saddlv s2, v4.8h +; CHECK-GI-BASE-NEXT: saddlv s0, v0.8h +; CHECK-GI-BASE-NEXT: saddlv s1, v1.8h +; CHECK-GI-BASE-NEXT: fmov w8, s2 +; CHECK-GI-BASE-NEXT: fmov w9, s0 +; CHECK-GI-BASE-NEXT: add w8, w8, w9 +; CHECK-GI-BASE-NEXT: fmov w9, s1 +; CHECK-GI-BASE-NEXT: add w0, w8, w9 ; CHECK-GI-BASE-NEXT: ret ; ; CHECK-GI-DOT-LABEL: test_sdot_v24i8: @@ -2436,61 +2364,33 @@ define i32 @test_sdot_v48i8(ptr %p1, ptr %p2) { ; ; CHECK-GI-BASE-LABEL: test_sdot_v48i8: ; CHECK-GI-BASE: // %bb.0: // %entry -; CHECK-GI-BASE-NEXT: fmov s0, wzr -; CHECK-GI-BASE-NEXT: fmov s2, wzr -; CHECK-GI-BASE-NEXT: ldr q16, [x0, #32] -; CHECK-GI-BASE-NEXT: fmov s1, wzr -; CHECK-GI-BASE-NEXT: fmov s3, wzr -; CHECK-GI-BASE-NEXT: ldr q19, [x1, #32] -; CHECK-GI-BASE-NEXT: ldp q5, q7, [x1] -; CHECK-GI-BASE-NEXT: sshll v23.8h, v16.8b, #0 -; CHECK-GI-BASE-NEXT: mov v0.s[1], wzr -; CHECK-GI-BASE-NEXT: mov v2.s[1], wzr -; CHECK-GI-BASE-NEXT: sshll v20.8h, v19.8b, #0 -; CHECK-GI-BASE-NEXT: mov v1.s[1], wzr -; CHECK-GI-BASE-NEXT: mov v3.s[1], wzr -; CHECK-GI-BASE-NEXT: sshll2 v19.8h, v19.16b, #0 -; CHECK-GI-BASE-NEXT: ldp q18, q17, [x0] -; CHECK-GI-BASE-NEXT: sshll v4.8h, v5.8b, #0 -; CHECK-GI-BASE-NEXT: sshll2 v5.8h, v5.16b, #0 -; CHECK-GI-BASE-NEXT: sshll v6.8h, v7.8b, #0 -; CHECK-GI-BASE-NEXT: sshll2 v7.8h, v7.16b, #0 -; CHECK-GI-BASE-NEXT: sshll2 v16.8h, v16.16b, #0 -; CHECK-GI-BASE-NEXT: mov v0.s[2], wzr -; CHECK-GI-BASE-NEXT: mov v2.s[2], wzr -; CHECK-GI-BASE-NEXT: sshll v21.8h, v18.8b, #0 -; CHECK-GI-BASE-NEXT: sshll2 v18.8h, v18.16b, #0 -; CHECK-GI-BASE-NEXT: sshll v22.8h, v17.8b, #0 -; CHECK-GI-BASE-NEXT: sshll2 v17.8h, v17.16b, #0 -; CHECK-GI-BASE-NEXT: mov v1.s[2], wzr -; CHECK-GI-BASE-NEXT: mov v3.s[2], wzr -; CHECK-GI-BASE-NEXT: smull v28.4s, v20.4h, v23.4h -; CHECK-GI-BASE-NEXT: smull v29.4s, v19.4h, v16.4h -; CHECK-GI-BASE-NEXT: smull v24.4s, v4.4h, v21.4h -; CHECK-GI-BASE-NEXT: smull v25.4s, v5.4h, v18.4h -; CHECK-GI-BASE-NEXT: smull v26.4s, v6.4h, v22.4h -; CHECK-GI-BASE-NEXT: smull v27.4s, v7.4h, v17.4h -; CHECK-GI-BASE-NEXT: mov v0.s[3], wzr -; CHECK-GI-BASE-NEXT: mov v2.s[3], wzr -; CHECK-GI-BASE-NEXT: mov v1.s[3], wzr -; CHECK-GI-BASE-NEXT: mov v3.s[3], wzr -; CHECK-GI-BASE-NEXT: smlal2 v28.4s, v20.8h, v23.8h -; CHECK-GI-BASE-NEXT: smlal2 v29.4s, v19.8h, v16.8h -; CHECK-GI-BASE-NEXT: smlal2 v24.4s, v4.8h, v21.8h -; CHECK-GI-BASE-NEXT: smlal2 v25.4s, v5.8h, v18.8h -; CHECK-GI-BASE-NEXT: smlal2 v26.4s, v6.8h, v22.8h -; CHECK-GI-BASE-NEXT: smlal2 v27.4s, v7.8h, v17.8h -; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v2.4s -; CHECK-GI-BASE-NEXT: add v1.4s, v1.4s, v3.4s -; CHECK-GI-BASE-NEXT: add v4.4s, v28.4s, v29.4s -; CHECK-GI-BASE-NEXT: add v2.4s, v24.4s, v25.4s -; CHECK-GI-BASE-NEXT: add v3.4s, v26.4s, v27.4s -; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v1.4s -; CHECK-GI-BASE-NEXT: add v1.4s, v2.4s, v3.4s -; CHECK-GI-BASE-NEXT: add v0.4s, v4.4s, v0.4s -; CHECK-GI-BASE-NEXT: add v0.4s, v1.4s, v0.4s -; CHECK-GI-BASE-NEXT: addv s0, v0.4s -; CHECK-GI-BASE-NEXT: fmov w0, s0 +; CHECK-GI-BASE-NEXT: ldp q0, q1, [x0] +; CHECK-GI-BASE-NEXT: ldr q3, [x0, #32] +; CHECK-GI-BASE-NEXT: ldp q2, q4, [x1] +; CHECK-GI-BASE-NEXT: ldr q5, [x1, #32] +; CHECK-GI-BASE-NEXT: smull v7.8h, v5.8b, v3.8b +; CHECK-GI-BASE-NEXT: smull2 v3.8h, v5.16b, v3.16b +; CHECK-GI-BASE-NEXT: smull v6.8h, v2.8b, v0.8b +; CHECK-GI-BASE-NEXT: smull2 v0.8h, v2.16b, v0.16b +; CHECK-GI-BASE-NEXT: smull2 v2.8h, v4.16b, v1.16b +; CHECK-GI-BASE-NEXT: smull v1.8h, v4.8b, v1.8b +; CHECK-GI-BASE-NEXT: saddlv s5, v7.8h +; CHECK-GI-BASE-NEXT: saddlv s3, v3.8h +; CHECK-GI-BASE-NEXT: saddlv s4, v6.8h +; CHECK-GI-BASE-NEXT: saddlv s0, v0.8h +; CHECK-GI-BASE-NEXT: saddlv s2, v2.8h +; CHECK-GI-BASE-NEXT: saddlv s1, v1.8h +; CHECK-GI-BASE-NEXT: fmov w11, s5 +; CHECK-GI-BASE-NEXT: fmov w8, s4 +; CHECK-GI-BASE-NEXT: fmov w9, s0 +; CHECK-GI-BASE-NEXT: fmov w10, s2 +; CHECK-GI-BASE-NEXT: add w8, w8, w9 +; CHECK-GI-BASE-NEXT: fmov w9, s1 +; CHECK-GI-BASE-NEXT: add w10, w10, w11 +; CHECK-GI-BASE-NEXT: fmov w11, s3 +; CHECK-GI-BASE-NEXT: add w8, w8, w9 +; CHECK-GI-BASE-NEXT: add w9, w10, w11 +; CHECK-GI-BASE-NEXT: add w0, w8, w9 ; CHECK-GI-BASE-NEXT: ret ; ; CHECK-GI-DOT-LABEL: test_sdot_v48i8: @@ -2549,18 +2449,27 @@ define i32 @test_udot_v8i8_multi_use(<8 x i8> %a, <8 x i8> %b) { ; CHECK-SD-DOT-NEXT: add w0, w8, w9 ; CHECK-SD-DOT-NEXT: ret ; -; CHECK-GI-LABEL: test_udot_v8i8_multi_use: -; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0 -; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0 -; CHECK-GI-NEXT: umull v2.4s, v1.4h, v0.4h -; CHECK-GI-NEXT: mov v3.16b, v2.16b -; CHECK-GI-NEXT: fmov w8, s2 -; CHECK-GI-NEXT: umlal2 v3.4s, v1.8h, v0.8h -; CHECK-GI-NEXT: addv s0, v3.4s -; CHECK-GI-NEXT: fmov w9, s0 -; CHECK-GI-NEXT: add w0, w9, w8 -; CHECK-GI-NEXT: ret +; CHECK-GI-BASE-LABEL: test_udot_v8i8_multi_use: +; CHECK-GI-BASE: // %bb.0: // %entry +; CHECK-GI-BASE-NEXT: umull v0.8h, v1.8b, v0.8b +; CHECK-GI-BASE-NEXT: uaddlv s1, v0.8h +; CHECK-GI-BASE-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-GI-BASE-NEXT: fmov w9, s0 +; CHECK-GI-BASE-NEXT: fmov w8, s1 +; CHECK-GI-BASE-NEXT: add w0, w8, w9 +; CHECK-GI-BASE-NEXT: ret +; +; CHECK-GI-DOT-LABEL: test_udot_v8i8_multi_use: +; CHECK-GI-DOT: // %bb.0: // %entry +; CHECK-GI-DOT-NEXT: movi v2.2d, #0000000000000000 +; CHECK-GI-DOT-NEXT: umull v3.8h, v1.8b, v0.8b +; CHECK-GI-DOT-NEXT: udot v2.2s, v1.8b, v0.8b +; CHECK-GI-DOT-NEXT: ushll v0.4s, v3.4h, #0 +; CHECK-GI-DOT-NEXT: fmov w9, s0 +; CHECK-GI-DOT-NEXT: addp v1.2s, v2.2s, v2.2s +; CHECK-GI-DOT-NEXT: fmov w8, s1 +; CHECK-GI-DOT-NEXT: add w0, w8, w9 +; CHECK-GI-DOT-NEXT: ret entry: %0 = zext <8 x i8> %a to <8 x i32> %1 = zext <8 x i8> %b to <8 x i32> |